Ejemplo n.º 1
0
func TestInvalidData(t *testing.T) {
	expect(t, func() {
		log := NewLogger(SkipStackFrames(-1))
		log.Info("foo", "a")
		log.Event("")
		log.Event("f\noo:")
		log.Event("foo:")
		log.Events("foo=", 2)
		log.Gauge("\tfoo", 2)
		log.Value("=foo", 2)
		log.NewTiming().Say(":foo")
		log.Info("foo", "a")
		log.Info("foo", true, "foo")
		log.Info("foo", "foo\t", "bar")
	}, []string{
		"ERROR " + errOddNumArgs.Error(),
		"INFO  foo",
		"ERROR " + errKeyEmpty.Error(),
		"ERROR " + errKeyInvalid.Error(),
		"ERROR " + errKeyInvalid.Error(),
		"ERROR " + errKeyInvalid.Error(),
		"ERROR " + errKeyInvalid.Error(),
		"ERROR " + errKeyInvalid.Error(),
		"ERROR " + errKeyInvalid.Error(),
		"ERROR " + errOddNumArgs.Error(),
		"INFO  foo",
		"ERROR " + errKeyNotString.Error(),
		"INFO  foo",
		"ERROR " + errKeyInvalid.Error(),
		"INFO  foo",
	})
}
Ejemplo n.º 2
0
func (e *Exporter) exportImage(name string, artifact *ct.Artifact) error {
	log := e.log.New("name", name)

	for _, rootfs := range artifact.Manifest().Rootfs {
		for _, layer := range rootfs.Layers {
			log.Info("exporting layer", "id", layer.ID)
			if err := e.exportLayer(layer); err != nil {
				log.Error("error exporting layer", "id", layer.ID, "err", err)
				return err
			}
		}
	}

	path := e.imagePath(artifact.Manifest().ID())
	if _, err := os.Stat(path); err == nil {
		log.Info("manifest already exists")
		return nil
	}

	log.Info("writing image manifest", "path", path)
	if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
		log.Error("error writing image manifest", "path", path, "err", err)
		return err
	}
	if err := ioutil.WriteFile(path, artifact.RawManifest, 0644); err != nil {
		log.Error("error writing image manifest", "path", path, "err", err)
		return err
	}

	return nil
}
Ejemplo n.º 3
0
func babySit(process *os.Process) int {
	log := logger.New("fn", "babySit")

	// Forward all signals to the app
	sigchan := make(chan os.Signal, 1)
	sigutil.CatchAll(sigchan)
	go func() {
		for sig := range sigchan {
			log.Info("received signal", "type", sig)
			if sig == syscall.SIGCHLD {
				continue
			}
			log.Info("forwarding signal to command", "type", sig)
			process.Signal(sig)
		}
	}()

	// Wait for the app to exit.  Also, as pid 1 it's our job to reap all
	// orphaned zombies.
	var wstatus syscall.WaitStatus
	for {
		pid, err := syscall.Wait4(-1, &wstatus, 0, nil)
		if err == nil && pid == process.Pid {
			break
		}
	}

	if wstatus.Signaled() {
		log.Info("command exited due to signal")
		return 0
	}
	return wstatus.ExitStatus()
}
Ejemplo n.º 4
0
func (c *ContainerInit) StreamState(arg struct{}, stream rpcplus.Stream) error {
	log := logger.New("fn", "StreamState")
	log.Info("starting to stream state")

	ch := make(chan StateChange)
	c.streamsMtx.Lock()
	c.mtx.Lock()
	select {
	case stream.Send <- StateChange{State: c.state, Error: c.error, ExitStatus: c.exitStatus}:
		log.Info("sent initial state")
	case <-stream.Error:
		c.mtx.Unlock()
		c.streamsMtx.Unlock()
		return nil
	}
	c.mtx.Unlock()
	c.streams[ch] = struct{}{}
	c.streamsMtx.Unlock()
	defer func() {
		log.Info("cleanup")
		go func() {
			// drain to prevent deadlock while removing the listener
			for range ch {
			}
		}()
		c.streamsMtx.Lock()
		delete(c.streams, ch)
		c.streamsMtx.Unlock()
		close(ch)
	}()

	log.Info("waiting for state changes")
	for {
		select {
		case change := <-ch:
			select {
			case stream.Send <- change:
				log.Info("sent state change", "state", change.State)
			case <-stream.Error:
				return nil
			}
		case <-stream.Error:
			return nil
		}
	}
}
Ejemplo n.º 5
0
// Function Home - that handles a web server request for a page
func Home(w http.ResponseWriter, req *http.Request) {
	io.WriteString(w, "Hello World!")
	log := logrus.New()
	hook, err := logrus_syslog.NewSyslogHook("udp", os.Getenv("SYSLOG_SERVER")+":"+os.Getenv("SYSLOG_PORT"), syslog.LOG_INFO, "")
	if err == nil {
		log.Hooks.Add(hook)
	}
	log.Info(req, "\n")
}
Ejemplo n.º 6
0
func TestLogCalldepth(t *testing.T) {
	buf := &bytes.Buffer{}
	SetBackend(NewLogBackend(buf, "", log.Lshortfile))

	log := MustGetLogger("test")
	log.Info("test filename")

	// Verify that the correct filename is registered
	if !strings.HasPrefix(buf.String(), "log_test.go:") {
		t.Errorf("incorrect filename: %s", buf.String())
	}
}
Ejemplo n.º 7
0
func export(args *docopt.Args) {
	log := log15.New()

	log.Info("decoding manifest")
	var manifest map[string]*ct.Artifact
	if err := cliutil.DecodeJSONArg(args.String["<manifest>"], &manifest); err != nil {
		log.Error("error decoding manifest", "err", err)
		os.Exit(1)
	}

	exporter := Exporter{
		dir: args.String["<dir>"],
		log: log15.New(),
	}

	log.Info(fmt.Sprintf("exporting %d images to %s", len(manifest), exporter.dir))
	if err := exporter.Export(manifest); err != nil {
		log.Error("error exporting images", "err", err)
		os.Exit(1)
	}
}
Ejemplo n.º 8
0
func TestRedacted(t *testing.T) {
	s := sensitive{"dupa55", "normal"}
	var _ Redactor = s
	for _, format := range []string{"%s", "%v", "%#v", "%q"} {
		b := new(bytes.Buffer)
		log := New(b, "test", DEBUG)
		log.Info(format, s)

		if logged := b.String(); strings.Contains(logged, s.Password) {
			t.Errorf("Not redacted: %#v in %#v.", s.Password, logged)
		}
	}
}
Ejemplo n.º 9
0
func makeRequest(addr string, t *testing.T) {
	log.Info("Sending Request: %s", addr)
	res, err := http.Get(addr)

	if err != nil {
		t.Fatalf("Error occured: %+s", err)
	}

	body, err := ioutil.ReadAll(res.Body)

	if err != nil {
		t.Fatalf("Error occured while reading body: %+s", err)
	}

	t.Logf("Body response: %s", body)
}
Ejemplo n.º 10
0
func logBroadcast(event *discoverd.Event) {
	log := logger.New("fn", "broadcast")
	ctx := []interface{}{
		"event", event.Kind,
		"service", event.Service,
	}
	if event.Instance != nil {
		ctx = append(ctx, []interface{}{
			"instance.id", event.Instance.ID,
			"instance.addr", event.Instance.Addr,
		}...)
	}
	if event.ServiceMeta != nil {
		ctx = append(ctx, []interface{}{"service_meta.index", event.ServiceMeta.Index, "service_meta.data", string(event.ServiceMeta.Data)}...)
	}
	log.Info(fmt.Sprintf("broadcasting %s event", event.Kind), ctx...)
}
Ejemplo n.º 11
0
func cli_db(fn func(*Db) error) func(*cli.Context) {
	return func(c *cli.Context) {
		env := cmd.Env(c)
		log := logging.Open(env)
		db, err := Open(env)
		if err == nil {
			db.Logger = log
			err = fn(db)
		}

		if err == nil {
			log.Info("Done!")
		} else {
			log.Error(err.Error())
		}

	}
}
Ejemplo n.º 12
0
func TestLogCalldepth(t *testing.T) {
	buf := &bytes.Buffer{}
	SetBackend(NewLogBackend(buf, "", log.Lshortfile))
	SetFormatter(MustStringFormatter("%{shortfile} %{level} %{message}"))

	log := MustGetLogger("test")
	log.Info("test filename")

	parts := strings.SplitN(buf.String(), " ", 2)

	// Verify that the correct filename is registered by the stdlib logger
	if !strings.HasPrefix(parts[0], "log_test.go:") {
		t.Errorf("incorrect filename: %s", parts[0])
	}
	// Verify that the correct filename is registered by go-logging
	if !strings.HasPrefix(parts[1], "log_test.go:") {
		t.Errorf("incorrect filename: %s", parts[1])
	}
}
Ejemplo n.º 13
0
// This is a simple stand-in for the Apple feedback service that
// can be used for testing purposes.
func mockServer(cert, key string) {
	crt, err := tls.X509KeyPair([]byte(cert), []byte(key))
	if err != nil {
		log.Fatal(err)
	}
	config := tls.Config{Certificates: []tls.Certificate{crt}, ClientAuth: tls.RequireAnyClientCert}
	log.Info("- starting Mock Apple TCP server at " + APNSGateway)

	srv, err := tls.Listen("tcp", APNSGateway, &config)
	if err != nil {
		log.Fatal(err)
	}
	for {
		conn, err := srv.Accept()
		if err != nil {
			log.Fatal(err)
		}
		go loop(conn)
	}
}
Ejemplo n.º 14
0
func main() {
	m := multiconfig.New()
	conf := new(Config)
	m.MustLoad(conf)

	log := logging.NewLogger("webhook")

	conf.PublicURL = fmt.Sprintf("%s%s", conf.PublicURL, proxyURL)
	sf := services.NewServices()
	RegisterServices(sf, conf)

	h := integration.NewHandler(log, sf)

	mux := http.NewServeMux()
	mux.Handle("/{name}/{token}", h)
	mux.HandleFunc("/configure/{name}", h.Configure)

	log.Info("Integration server started")
	if err := http.ListenAndServe(conf.Addr, mux); err != nil {
		log.Fatal("Could not initialize server: %s", err)
	}
}
Ejemplo n.º 15
0
Archivo: veb.go Proyecto: spydez/veb
// Command veb to do things with your stuff!
// See all the commands in const, above. Also flags.
// 'veb help' for a pretty print of flags/commands on command line.
func main() {
	out := log.New(os.Stdout, "", 0)

	// define flags
	// TODO
	//  - max CPUs
	//  - verbose

	// parse flags & args
	flag.Parse()

	// setup goroutine parallization
	// cuz it just runs on one processor out of the box...
	NUM_CPUS := runtime.NumCPU()
	runtime.GOMAXPROCS(NUM_CPUS)
	MAX_HANDLERS = NUM_CPUS
	if NUM_CPUS > 4 {
		// TODO: is running on all procs a good idea? (will it starve the system?)
		// using half for now
		// probably go back to all when niced.
		MAX_HANDLERS /= 2
	}

	// sanity check
	if len(flag.Args()) == 0 {
		// TODO
		out.Fatal("INSERT HELP HERE")
	}

	// init is a bit different (no pre-existing index),
	// so take care of it here instead of inside switch
	if flag.Args()[0] == INIT {
		err := Init()
		if err != nil {
			out.Fatal(err)
		}
		pwd, _ := os.Getwd()
		out.Println("Initialized empty veb repository at", pwd)
		return // done
	}

	// find veb repo
	root, err := cdBaseDir()
	if err != nil {
		fmt.Println(err, "\n")
		out.Fatal("Use 'veb init' to create this veb repository.")
	}

	// make the logger
	logf, err := os.OpenFile(path.Join(root, veb.META_FOLDER, veb.LOG_FILE),
		os.O_RDWR|os.O_APPEND|os.O_CREATE, 0644)
	log := veb.NewLog(log.New(logf, "", log.LstdFlags|log.Lshortfile))
	defer log.Info().Println("done\n\n")

	// load the index
	index, err := veb.Load(root, log)
	if err != nil {
		out.Fatal("veb could not load index")
	}

	// print intro
	fmt.Println("veb repository at", root, "\n")

	// act on command
	switch flag.Args()[0] {
	case STATUS:
		err = Status(index, log)
		if err != nil {
			out.Fatal(err)
		}

	case VERIFY:
		err = Verify(index, log)
		if err != nil {
			out.Fatal(err)
		}

	case COMMIT:
		err = Commit(index, log)
		if err != nil {
			out.Fatal(err)
		}

	case REMOTE:
		if len(flag.Args()) < 2 {
			out.Fatal(REMOTE, " needs a path to the backup repository",
				"\n  e.g. 'veb remote ~/backups/music'")
		}
		err = Remote(index, flag.Args()[1], log)
		if err != nil {
			out.Fatal(err)
		}

	case PUSH:
		err = Push(index, log)
		if err != nil {
			out.Fatal(err)
		}

	case FIX:
		// TODO: implement
		out.Fatal("this command is not yet implemented")
	case PULL:
		// TODO: implement
		out.Fatal("this command is not yet implemented")
	case SYNC:
		// TODO: implement
		out.Fatal("this command is not yet implemented")
	case HELP:
		// may provide per-cmd help later, but for now, just the default help
		// TODO: per command help
		fallthrough
	default:
		// TODO
		fmt.Println("INSERT HELP HERE")
	}
}
Ejemplo n.º 16
0
// Run as pid 1 and monitor the contained process to return its exit code.
func containerInitApp(c *Config, logFile *os.File) error {
	log := logger.New("fn", "containerInitApp")

	init := newContainerInit(c, logFile)
	log.Info("registering RPC server")
	if err := rpcplus.Register(init); err != nil {
		log.Error("error registering RPC server", "err", err)
		return err
	}
	init.mtx.Lock()
	defer init.mtx.Unlock()

	// Prepare the cmd based on the given args
	// If this fails we report that below
	cmdPath, cmdErr := getCmdPath(c)
	cmd := exec.Command(cmdPath, c.Args[1:]...)
	cmd.Dir = c.WorkDir

	cmd.Env = make([]string, 0, len(c.Env))
	for k, v := range c.Env {
		cmd.Env = append(cmd.Env, k+"="+v)
	}

	// App runs in its own session
	cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}

	// Console setup.  Hook up the container app's stdin/stdout/stderr to
	// either a pty or pipes.  The FDs for the controlling side of the
	// pty/pipes will be passed to flynn-host later via a UNIX socket.
	if c.TTY {
		log.Info("creating PTY")
		ptyMaster, ptySlave, err := pty.Open()
		if err != nil {
			log.Info("error creating PTY", "err", err)
			return err
		}
		init.ptyMaster = ptyMaster
		cmd.Stdout = ptySlave
		cmd.Stderr = ptySlave
		if c.OpenStdin {
			log.Info("attaching stdin to PTY")
			cmd.Stdin = ptySlave
			cmd.SysProcAttr.Setctty = true
		}
	} else {
		// we use syscall.Socketpair (rather than cmd.StdoutPipe) to make it easier
		// for flynn-host to do non-blocking I/O (via net.FileConn) so that no
		// read(2) calls can succeed after closing the logs during an update.
		log.Info("creating stdout pipe")
		var err error
		cmd.Stdout, init.stdout, err = newSocketPair("stdout")
		if err != nil {
			log.Error("error creating stdout pipe", "err", err)
			return err
		}

		log.Info("creating stderr pipe")
		cmd.Stderr, init.stderr, err = newSocketPair("stderr")
		if err != nil {
			log.Error("error creating stderr pipe", "err", err)
			return err
		}

		log.Info("creating FD proxies")
		if err := createFDProxies(cmd); err != nil {
			log.Error("error creating FD proxies", "err", err)
			return err
		}

		if c.OpenStdin {
			// Can't use cmd.StdinPipe() here, since in Go 1.2 it
			// returns an io.WriteCloser with the underlying object
			// being an *exec.closeOnce, neither of which provides
			// a way to convert to an FD.
			log.Info("creating stdin pipe")
			pipeRead, pipeWrite, err := os.Pipe()
			if err != nil {
				log.Error("creating stdin pipe", "err", err)
				return err
			}
			cmd.Stdin = pipeRead
			init.stdin = pipeWrite
		}
	}

	go runRPCServer()

	// Wait for flynn-host to tell us to start
	init.mtx.Unlock() // Allow calls
	log.Info("waiting to be resumed")
	<-init.resume
	log.Info("resuming")
	init.mtx.Lock()

	if cmdErr != nil {
		log.Error("command failed", "err", cmdErr)
		init.changeState(StateFailed, cmdErr.Error(), -1)
		init.exit(1)
	}
	// Container setup
	log.Info("setting up the container")
	if err := setupCommon(c, log); err != nil {
		log.Error("error setting up the container", "err", err)
		init.changeState(StateFailed, err.Error(), -1)
		init.exit(1)
	}
	// Start the app
	log.Info("starting the command")
	if err := cmd.Start(); err != nil {
		log.Error("error starting the command", "err", err)
		init.changeState(StateFailed, err.Error(), -1)
		init.exit(1)
	}
	log.Info("setting state to running")
	init.process = cmd.Process
	init.changeState(StateRunning, "", -1)

	init.mtx.Unlock() // Allow calls
	// monitor services
	hbs := make([]discoverd.Heartbeater, 0, len(c.Ports))
	for _, port := range c.Ports {
		if port.Service == nil {
			continue
		}
		log = log.New("service", port.Service.Name, "port", port.Port, "proto", port.Proto)
		log.Info("monitoring service")
		hb, err := monitor(port, init, c.Env, log)
		if err != nil {
			log.Error("error monitoring service", "err", err)
			os.Exit(70)
		}
		hbs = append(hbs, hb)
	}
	exitCode := babySit(init.process)
	log.Info("command exited", "status", exitCode)
	init.mtx.Lock()
	for _, hb := range hbs {
		hb.Close()
	}
	init.changeState(StateExited, "", exitCode)
	init.mtx.Unlock() // Allow calls

	log.Info("exiting")
	init.exit(exitCode)
	return nil
}
Ejemplo n.º 17
0
func ExampleNewLogger() {
	say.SetData("weather", "sunny")
	log := say.NewLogger()
	log.Info("hello") // INFO  hello	| weather="sunny"
}
Ejemplo n.º 18
0
// AmqpChannel is the same as amqpConnect in boulder, but with even
// more aggressive error dropping
func AmqpChannel(conf Config) (*amqp.Channel, error) {
	var conn *amqp.Connection
	var err error

	log := blog.GetAuditLogger()

	if conf.AMQP.TLS == nil {
		// Configuration did not specify TLS options, but Dial will
		// use TLS anyway if the URL scheme is "amqps"
		conn, err = amqp.Dial(conf.AMQP.Server)

	} else {
		// They provided TLS options, so let's load them.
		log.Info("AMQPS: Loading TLS Options.")

		if strings.HasPrefix(conf.AMQP.Server, "amqps") == false {
			err = fmt.Errorf("AMQPS: TLS configuration provided, but not using an AMQPS URL")
			return nil, err
		}

		cfg := new(tls.Config)

		// If the configuration specified a certificate (or key), load them
		if conf.AMQP.TLS.CertFile != nil || conf.AMQP.TLS.KeyFile != nil {
			// But they have to give both.
			if conf.AMQP.TLS.CertFile == nil || conf.AMQP.TLS.KeyFile == nil {
				err = fmt.Errorf("AMQPS: You must set both of the configuration values AMQP.TLS.KeyFile and AMQP.TLS.CertFile.")
				return nil, err
			}

			cert, err := tls.LoadX509KeyPair(*conf.AMQP.TLS.CertFile, *conf.AMQP.TLS.KeyFile)
			if err != nil {
				err = fmt.Errorf("AMQPS: Could not load Client Certificate or Key: %s", err)
				return nil, err
			}

			log.Info("AMQPS: Configured client certificate for AMQPS.")
			cfg.Certificates = append(cfg.Certificates, cert)
		}

		// If the configuration specified a CA certificate, make it the only
		// available root.
		if conf.AMQP.TLS.CACertFile != nil {
			cfg.RootCAs = x509.NewCertPool()

			ca, err := ioutil.ReadFile(*conf.AMQP.TLS.CACertFile)
			if err != nil {
				err = fmt.Errorf("AMQPS: Could not load CA Certificate: %s", err)
				return nil, err
			}
			cfg.RootCAs.AppendCertsFromPEM(ca)
			log.Info("AMQPS: Configured CA certificate for AMQPS.")
		}

		conn, err = amqp.DialTLS(conf.AMQP.Server, cfg)
	}

	if err != nil {
		return nil, err
	}

	err = rpc.AMQPDeclareExchange(conn)
	if err != nil {
		return nil, err
	}

	return conn.Channel()
}
Ejemplo n.º 19
0
func (cmd) Execute(arguments map[string]interface{}) bool {
	// Read arguments
	imageFile := arguments["<image>"].(string)
	command := arguments["<command>"].([]string)
	vnc := arguments["--vnc"].(bool)

	// Create temporary storage and environment
	storage, err := runtime.NewTemporaryStorage(os.TempDir())
	if err != nil {
		panic("Failed to create TemporaryStorage")
	}
	environment := &runtime.Environment{
		TemporaryStorage: storage,
	}

	// Create a temporary folder
	tempFolder := filepath.Join("/tmp", slugid.Nice())
	if err = os.Mkdir(tempFolder, 0777); err != nil {
		log.Fatal("Failed to create temporary folder in /tmp, error: ", err)
	}

	// Create the necessary runtime setup
	gc := &gc.GarbageCollector{}
	logger, _ := runtime.CreateLogger("info")
	log := logger.WithField("component", "qemu-run")

	// Create image manager
	log.Info("Creating image manager")
	manager, err := image.NewManager(filepath.Join(tempFolder, "/images/"), gc, logger.WithField("component", "image-manager"), nil)
	if err != nil {
		log.Fatal("Failed to create image manager", err)
	}

	// Get an instance of the image
	log.Info("Creating instance of image")
	image, err := manager.Instance("image", func(target string) error {
		return cp.CopyFile(target, imageFile)
	})
	if err != nil {
		log.Fatal("Failed to create instance of image, error: ", err)
	}

	// Setup a user-space network
	log.Info("Creating user-space network")
	net, err := network.NewUserNetwork(tempFolder)
	if err != nil {
		log.Fatal("Failed to create user-space network, error: ", err)
	}

	// Create virtual machine
	log.Info("Creating virtual machine")
	vm, err := vm.NewVirtualMachine(
		image.Machine().Options(), image, net, tempFolder,
		"", "", logger.WithField("component", "vm"),
	)
	if err != nil {
		log.Fatal("Failed to create virtual-machine, error: ", err)
	}

	// Create meta-data service
	log.Info("Creating meta-data service")
	var shellServer *interactive.ShellServer
	var displayServer *interactive.DisplayServer
	ms := metaservice.New(command, make(map[string]string), os.Stdout, func(result bool) {
		fmt.Println("### Task Completed, result = ", result)
		shellServer.WaitAndClose()
		displayServer.Abort()
		vm.Kill()
	}, environment)

	// Setup http handler for network
	vm.SetHTTPHandler(ms)

	// Create ShellServer
	shellServer = interactive.NewShellServer(
		ms.ExecShell, log.WithField("component", "shell-server"),
	)

	// Create displayServer
	displayServer = interactive.NewDisplayServer(
		&socketDisplayProvider{socket: vm.VNCSocket()},
		log.WithField("component", "display-server"),
	)

	interactiveHandler := http.NewServeMux()
	interactiveHandler.Handle("/shell/", shellServer)
	interactiveHandler.Handle("/display/", displayServer)
	interactiveServer := graceful.Server{
		Timeout: 30 * time.Second,
		Server: &http.Server{
			Addr:    "localhost:8080",
			Handler: interactiveHandler,
		},
		NoSignalHandling: true,
	}
	go interactiveServer.ListenAndServe()

	// Start the virtual machine
	log.Info("Start the virtual machine")
	vm.Start()

	// Start vncviewer
	done := make(chan struct{})
	if vnc {
		go StartVNCViewer(vm.VNCSocket(), done)
	}

	// Wait for SIGINT/SIGKILL or vm.Done
	c := make(chan os.Signal, 2)
	signal.Notify(c, os.Interrupt, os.Kill) // This pattern leaks, acceptable here
	select {
	case <-c:
		signal.Stop(c)
		fmt.Println("### Terminating QEMU")
		vm.Kill()
	case <-vm.Done:
		fmt.Println("### QEMU terminated")
	}
	close(done)

	// Ensure that QEMU has terminated before we continue
	<-vm.Done
	interactiveServer.Stop(100 * time.Millisecond)

	// Clean up anything left in the garbage collector
	gc.CollectAll()
	return true
}
Ejemplo n.º 20
0
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	httpPort := args.String["--http-port"]
	externalIP := args.String["--external-ip"]
	listenIP := args.String["--listen-ip"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	tags := parseTagArgs(args.String["--tags"])
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	volProvider := args.String["--vol-provider"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	logDir := args.String["--log-dir"]
	discoveryToken := args.String["--discovery"]
	bridgeName := args.String["--bridge-name"]

	logger, err := setupLogger(logDir)
	if err != nil {
		shutdown.Fatalf("error setting up logger: %s", err)
	}

	var peerIPs []string
	if args.String["--peer-ips"] != "" {
		peerIPs = strings.Split(args.String["--peer-ips"], ",")
	}

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}

	var maxJobConcurrency uint64 = 4
	if m, err := strconv.ParseUint(args.String["--max-job-concurrency"], 10, 64); err == nil {
		maxJobConcurrency = m
	}

	var partitionCGroups = make(map[string]int64) // name -> cpu shares
	for _, p := range strings.Split(args.String["--partitions"], " ") {
		nameShares := strings.Split(p, "=cpu_shares:")
		if len(nameShares) != 2 {
			shutdown.Fatalf("invalid partition specifier: %q", p)
		}
		shares, err := strconv.ParseInt(nameShares[1], 10, 64)
		if err != nil || shares < 2 {
			shutdown.Fatalf("invalid cpu shares specifier: %q", shares)
		}
		partitionCGroups[nameShares[0]] = shares
	}
	for _, s := range []string{"user", "system", "background"} {
		if _, ok := partitionCGroups[s]; !ok {
			shutdown.Fatalf("missing mandatory resource partition: %s", s)
		}
	}

	log := logger.New("fn", "runDaemon", "host.id", hostID)
	log.Info("starting daemon")

	log.Info("validating host ID")
	if strings.Contains(hostID, "-") {
		shutdown.Fatal("host id must not contain dashes")
	}
	if externalIP == "" {
		log.Info("detecting external IP")
		var err error
		externalIP, err = config.DefaultExternalIP()
		if err != nil {
			log.Error("error detecting external IP", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("using external IP " + externalIP)
	}

	publishAddr := net.JoinHostPort(externalIP, httpPort)
	if discoveryToken != "" {
		// TODO: retry
		log.Info("registering with cluster discovery service", "token", discoveryToken, "addr", publishAddr, "name", hostID)
		discoveryID, err := discovery.RegisterInstance(discovery.Info{
			ClusterURL:  discoveryToken,
			InstanceURL: "http://" + publishAddr,
			Name:        hostID,
		})
		if err != nil {
			log.Error("error registering with cluster discovery service", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("registered with cluster discovery service", "id", discoveryID)
	}

	state := NewState(hostID, stateFile)
	shutdown.BeforeExit(func() { state.CloseDB() })

	log.Info("initializing volume manager", "provider", volProvider)
	var newVolProvider func() (volume.Provider, error)
	switch volProvider {
	case "zfs":
		newVolProvider = func() (volume.Provider, error) {
			// use a zpool backing file size of either 70% of the device on which
			// volumes will reside, or 100GB if that can't be determined.
			log.Info("determining ZFS zpool size")
			var size int64
			var dev syscall.Statfs_t
			if err := syscall.Statfs(volPath, &dev); err == nil {
				size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
			} else {
				size = 100000000000
			}
			log.Info(fmt.Sprintf("using ZFS zpool size %d", size))

			return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
				DatasetName: "flynn-default",
				Make: &zfsVolume.MakeDev{
					BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
					Size:            size,
				},
				WorkingDir: filepath.Join(volPath, "zfs"),
			})
		}
	case "mock":
		newVolProvider = func() (volume.Provider, error) { return nil, nil }
	default:
		shutdown.Fatalf("unknown volume provider: %q", volProvider)
	}
	vman := volumemanager.New(
		filepath.Join(volPath, "volumes.bolt"),
		newVolProvider,
	)
	shutdown.BeforeExit(func() { vman.CloseDB() })

	mux := logmux.New(hostID, logDir, logger.New("host.id", hostID, "component", "logmux"))

	log.Info("initializing job backend", "type", backendName)
	var backend Backend
	switch backendName {
	case "libcontainer":
		backend, err = NewLibcontainerBackend(state, vman, bridgeName, flynnInit, mux, partitionCGroups, logger.New("host.id", hostID, "component", "backend", "backend", "libcontainer"))
	case "mock":
		backend = MockBackend{}
	default:
		shutdown.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		shutdown.Fatal(err)
	}
	backend.SetDefaultEnv("EXTERNAL_IP", externalIP)
	backend.SetDefaultEnv("LISTEN_IP", listenIP)

	var buffers host.LogBuffers
	discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr, tags)
	publishURL := "http://" + publishAddr
	host := &Host{
		id:  hostID,
		url: publishURL,
		status: &host.HostStatus{
			ID:      hostID,
			PID:     os.Getpid(),
			URL:     publishURL,
			Tags:    tags,
			Version: version.String(),
		},
		state:   state,
		backend: backend,
		vman:    vman,
		discMan: discoverdManager,
		log:     logger.New("host.id", hostID),

		maxJobConcurrency: maxJobConcurrency,
	}
	backend.SetHost(host)

	// restore the host status if set in the environment
	if statusEnv := os.Getenv("FLYNN_HOST_STATUS"); statusEnv != "" {
		log.Info("restoring host status from parent")
		if err := json.Unmarshal([]byte(statusEnv), &host.status); err != nil {
			log.Error("error restoring host status from parent", "err", err)
			shutdown.Fatal(err)
		}
		pid := os.Getpid()
		log.Info("setting status PID", "pid", pid)
		host.status.PID = pid
		// keep the same tags as the parent
		discoverdManager.UpdateTags(host.status.Tags)
	}

	log.Info("creating HTTP listener")
	l, err := newHTTPListener(net.JoinHostPort(listenIP, httpPort))
	if err != nil {
		log.Error("error creating HTTP listener", "err", err)
		shutdown.Fatal(err)
	}
	host.listener = l
	shutdown.BeforeExit(func() { host.Close() })

	// if we have a control socket FD, wait for a "resume" message before
	// opening state DBs and serving requests.
	var controlFD int
	if fdEnv := os.Getenv("FLYNN_CONTROL_FD"); fdEnv != "" {
		log.Info("parsing control socket file descriptor")
		controlFD, err = strconv.Atoi(fdEnv)
		if err != nil {
			log.Error("error parsing control socket file descriptor", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("waiting for resume message from parent")
		msg := make([]byte, len(ControlMsgResume))
		if _, err := syscall.Read(controlFD, msg); err != nil {
			log.Error("error waiting for resume message from parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("validating resume message")
		if !bytes.Equal(msg, ControlMsgResume) {
			log.Error(fmt.Sprintf("unexpected resume message from parent: %v", msg))
			shutdown.ExitWithCode(1)
		}

		log.Info("receiving log buffers from parent")
		if err := json.NewDecoder(&controlSock{controlFD}).Decode(&buffers); err != nil {
			log.Error("error receiving log buffers from parent", "err", err)
			shutdown.Fatal(err)
		}
	}

	log.Info("opening state databases")
	if err := host.OpenDBs(); err != nil {
		log.Error("error opening state databases", "err", err)
		shutdown.Fatal(err)
	}

	// stopJobs stops all jobs, leaving discoverd until the end so other
	// jobs can unregister themselves on shutdown.
	stopJobs := func() (err error) {
		var except []string
		host.statusMtx.RLock()
		if host.status.Discoverd != nil && host.status.Discoverd.JobID != "" {
			except = []string{host.status.Discoverd.JobID}
		}
		host.statusMtx.RUnlock()
		log.Info("stopping all jobs except discoverd")
		if err := backend.Cleanup(except); err != nil {
			log.Error("error stopping all jobs except discoverd", "err", err)
			return err
		}
		for _, id := range except {
			log.Info("stopping discoverd")
			if e := backend.Stop(id); e != nil {
				log.Error("error stopping discoverd", "err", err)
				err = e
			}
		}
		return
	}

	log.Info("restoring state")
	resurrect, err := state.Restore(backend, buffers)
	if err != nil {
		log.Error("error restoring state", "err", err)
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() {
		// close discoverd before stopping jobs so we can unregister first
		log.Info("unregistering with service discovery")
		if err := discoverdManager.Close(); err != nil {
			log.Error("error unregistering with service discovery", "err", err)
		}
		stopJobs()
	})

	log.Info("serving HTTP requests")
	host.ServeHTTP()

	if controlFD > 0 {
		// now that we are serving requests, send an "ok" message to the parent
		log.Info("sending ok message to parent")
		if _, err := syscall.Write(controlFD, ControlMsgOK); err != nil {
			log.Error("error sending ok message to parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("closing control socket")
		if err := syscall.Close(controlFD); err != nil {
			log.Error("error closing control socket", "err", err)
		}
	}

	if force {
		log.Info("forcibly stopping existing jobs")
		if err := stopJobs(); err != nil {
			log.Error("error forcibly stopping existing jobs", "err", err)
			shutdown.Fatal(err)
		}
	}

	if discoveryToken != "" {
		log.Info("getting cluster peer IPs")
		instances, err := discovery.GetCluster(discoveryToken)
		if err != nil {
			// TODO(titanous): retry?
			log.Error("error getting discovery cluster", "err", err)
			shutdown.Fatal(err)
		}
		peerIPs = make([]string, 0, len(instances))
		for _, inst := range instances {
			u, err := url.Parse(inst.URL)
			if err != nil {
				continue
			}
			ip, _, err := net.SplitHostPort(u.Host)
			if err != nil || ip == externalIP {
				continue
			}
			peerIPs = append(peerIPs, ip)
		}
		log.Info("got cluster peer IPs", "peers", peerIPs)
	}
	log.Info("connecting to cluster peers")
	if err := discoverdManager.ConnectPeer(peerIPs); err != nil {
		log.Info("no cluster peers available")
	}

	if !args.Bool["--no-resurrect"] {
		log.Info("resurrecting jobs")
		resurrect()
	}

	monitor := NewMonitor(host.discMan, externalIP, logger)
	shutdown.BeforeExit(func() { monitor.Shutdown() })
	go monitor.Run()

	log.Info("blocking main goroutine")
	<-make(chan struct{})
}
Ejemplo n.º 21
0
// StartService bootstraps the metadata service
func StartService(configFile, address, profileName, MFA string, port int, fake bool) {
	log := &ConsoleLogger{}
	config := Config{}

	// TODO: Move to function and use a default configuration file
	if configFile != "" {
		// Parse in options from the given config file.
		log.Debug("Loading configuration: %s\n", configFile)
		configContents, configErr := ioutil.ReadFile(configFile)
		if configErr != nil {
			log.Fatalf("Error reading config: %s\n", configErr.Error())
		}

		configParseErr := yaml.Unmarshal(configContents, &config)
		if configParseErr != nil {
			log.Fatalf("Error in parsing config file: %s\n", configParseErr.Error())
		}

		if len(config.Profiles) == 0 {
			log.Info("No profiles found, falling back to old config format.\n")
			configParseErr := yaml.Unmarshal(configContents, &config.Profiles)
			if configParseErr != nil {
				log.Fatalf("Error in parsing config file: %s\n", configParseErr.Error())
			}
			if len(config.Profiles) > 0 {
				log.Warning("WARNING: old deprecated config format is used.\n")
			}
		}
	} else {
		log.Debug("No configuration file given\n")
	}

	defer func() {
		log.Debug("Removing socket: %v\n", address)
		os.Remove(address)
	}()

	if port == 0 {
		port = config.Port
	}

	if port == 0 {
		port = 80
	}

	// Startup the HTTP server and respond to requests.
	listener, err := net.ListenTCP("tcp", &net.TCPAddr{
		IP:   net.ParseIP("169.254.169.254"),
		Port: port,
	})
	if err != nil {
		log.Fatalf("Failed to bind to socket: %s\n", err)
	}

	var credsManager CredentialsManager
	if fake {
		credsManager = &FakeCredentialsManager{}
	} else {
		credsManager = NewCredentialsExpirationManager(profileName, config, MFA)
	}

	log.Info("Starting web service: %v:%v\n", "169.254.169.254", port)
	mds, metadataError := NewMetadataService(listener, credsManager)
	if metadataError != nil {
		log.Fatalf("Failed to start metadata service: %s\n", metadataError.Error())
	}
	mds.Start()

	stop := make(chan struct{})
	agentServer := NewCliHandler(address, credsManager, stop, config)
	err = agentServer.Start()
	if err != nil {
		log.Fatalf("Failed to start agentServer: %s\n", err.Error())
	}

	// Wait for a graceful shutdown signal
	terminate := make(chan os.Signal)
	signal.Notify(terminate, syscall.SIGINT, syscall.SIGTERM)

	log.Info("Service: online\n")
	defer log.Info("Caught signal: shutting down.\n")

	for {
		select {
		case <-stop:
			return
		case <-terminate:
			return
		}
	}
}
Ejemplo n.º 22
0
func babySit(init *ContainerInit, hbs []discoverd.Heartbeater) int {
	log := logger.New()

	var shutdownOnce sync.Once
	hbDone := make(chan struct{})
	closeHBs := func() {
		for _, hb := range hbs {
			if err := hb.Close(); err != nil {
				log.Error("error deregistering service", "addr", hb.Addr(), "err", err)
			} else {
				log.Info("service deregistered", "addr", hb.Addr())
			}
		}
		close(hbDone)
	}

	// Close the heartbeaters if requested to do so
	go func() {
		<-init.deregister
		log.Info("received deregister request")
		shutdownOnce.Do(closeHBs)
	}()

	// Forward all signals to the app
	sigchan := make(chan os.Signal, 1)
	sigutil.CatchAll(sigchan)
	go func() {
		for sig := range sigchan {
			log.Info("received signal", "type", sig)
			if sig == syscall.SIGCHLD {
				continue
			}
			if sig == syscall.SIGTERM || sig == syscall.SIGINT {
				shutdownOnce.Do(closeHBs)
			}
			log.Info("forwarding signal to job", "type", sig)
			init.process.Signal(sig)
		}
	}()

	// Wait for the app to exit.  Also, as pid 1 it's our job to reap all
	// orphaned zombies.
	var wstatus syscall.WaitStatus
	for {
		pid, err := syscall.Wait4(-1, &wstatus, 0, nil)
		if err == nil && pid == init.process.Pid {
			break
		}
	}

	// Ensure that the heartbeaters are closed even if the app wasn't signaled
	shutdownOnce.Do(closeHBs)
	select {
	case <-hbDone:
	case <-time.After(5 * time.Second):
		log.Error("timed out waiting for services to be deregistered")
	}

	if wstatus.Signaled() {
		log.Debug("job exited due to signal")
		return 0
	}

	return wstatus.ExitStatus()
}
Ejemplo n.º 23
0
// Run as pid 1 and monitor the contained process to return its exit code.
func containerInitApp(c *Config, logFile *os.File) error {
	log := logger.New()

	init := newContainerInit(c, logFile)
	log.Debug("registering RPC server")
	if err := rpcplus.Register(init); err != nil {
		log.Error("error registering RPC server", "err", err)
		return err
	}
	init.mtx.Lock()
	defer init.mtx.Unlock()

	// Prepare the cmd based on the given args
	// If this fails we report that below
	cmdPath, cmdErr := getCmdPath(c)
	cmd := exec.Command(cmdPath, c.Args[1:]...)
	cmd.Dir = c.WorkDir

	cmd.Env = make([]string, 0, len(c.Env))
	for k, v := range c.Env {
		cmd.Env = append(cmd.Env, k+"="+v)
	}

	// App runs in its own session
	cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}

	if c.Uid != nil || c.Gid != nil {
		cmd.SysProcAttr.Credential = &syscall.Credential{}
		if c.Uid != nil {
			cmd.SysProcAttr.Credential.Uid = *c.Uid
		}
		if c.Gid != nil {
			cmd.SysProcAttr.Credential.Gid = *c.Gid
		}
	}

	// Console setup.  Hook up the container app's stdin/stdout/stderr to
	// either a pty or pipes.  The FDs for the controlling side of the
	// pty/pipes will be passed to flynn-host later via a UNIX socket.
	if c.TTY {
		log.Debug("creating PTY")
		ptyMaster, ptySlave, err := pty.Open()
		if err != nil {
			log.Error("error creating PTY", "err", err)
			return err
		}
		init.ptyMaster = ptyMaster
		cmd.Stdout = ptySlave
		cmd.Stderr = ptySlave
		if c.OpenStdin {
			log.Debug("attaching stdin to PTY")
			cmd.Stdin = ptySlave
			cmd.SysProcAttr.Setctty = true
		}
		if c.Uid != nil && c.Gid != nil {
			if err := syscall.Fchown(int(ptySlave.Fd()), int(*c.Uid), int(*c.Gid)); err != nil {
				log.Error("error changing PTY ownership", "err", err)
				return err
			}
		}
	} else {
		// We copy through a socketpair (rather than using cmd.StdoutPipe directly) to make
		// it easier for flynn-host to do non-blocking I/O (via net.FileConn) so that no
		// read(2) calls can succeed after closing the logs during an update.
		//
		// We also don't assign the socketpair directly to fd 1 because that prevents jobs
		// using /dev/stdout (calling open(2) on a socket leads to an ENXIO error, see
		// http://marc.info/?l=ast-users&m=120978595414993).
		newPipe := func(pipeFn func() (io.ReadCloser, error), name string) (*os.File, error) {
			pipe, err := pipeFn()
			if err != nil {
				return nil, err
			}
			if c.Uid != nil && c.Gid != nil {
				if err := syscall.Fchown(int(pipe.(*os.File).Fd()), int(*c.Uid), int(*c.Gid)); err != nil {
					return nil, err
				}
			}
			sockR, sockW, err := newSocketPair(name)
			if err != nil {
				return nil, err
			}
			go func() {
				defer sockW.Close()
				for {
					// copy data from the pipe to the socket using splice(2)
					// (rather than io.Copy) to avoid a needless copy through
					// user space
					n, err := syscall.Splice(int(pipe.(*os.File).Fd()), nil, int(sockW.Fd()), nil, 65535, 0)
					if err != nil || n == 0 {
						return
					}
				}
			}()
			return sockR, nil
		}

		log.Debug("creating stdout pipe")
		var err error
		init.stdout, err = newPipe(cmd.StdoutPipe, "stdout")
		if err != nil {
			log.Error("error creating stdout pipe", "err", err)
			return err
		}

		log.Debug("creating stderr pipe")
		init.stderr, err = newPipe(cmd.StderrPipe, "stderr")
		if err != nil {
			log.Error("error creating stderr pipe", "err", err)
			return err
		}

		if c.OpenStdin {
			// Can't use cmd.StdinPipe() here, since in Go 1.2 it
			// returns an io.WriteCloser with the underlying object
			// being an *exec.closeOnce, neither of which provides
			// a way to convert to an FD.
			log.Debug("creating stdin pipe")
			pipeRead, pipeWrite, err := os.Pipe()
			if err != nil {
				log.Error("creating stdin pipe", "err", err)
				return err
			}
			cmd.Stdin = pipeRead
			init.stdin = pipeWrite
		}
	}

	go runRPCServer()

	// Wait for flynn-host to tell us to start
	init.mtx.Unlock() // Allow calls
	log.Debug("waiting to be resumed")
	<-init.resume
	log.Debug("resuming")
	init.mtx.Lock()

	log.Info("starting the job", "args", cmd.Args)
	if cmdErr != nil {
		log.Error("error starting the job", "err", cmdErr)
		init.changeState(StateFailed, cmdErr.Error(), -1)
		init.exit(1)
	}
	if err := cmd.Start(); err != nil {
		log.Error("error starting the job", "err", err)
		init.changeState(StateFailed, err.Error(), -1)
		init.exit(1)
	}
	log.Debug("setting state to running")
	init.process = cmd.Process
	init.changeState(StateRunning, "", -1)

	init.mtx.Unlock() // Allow calls
	// monitor services
	hbs := make([]discoverd.Heartbeater, 0, len(c.Ports))
	for _, port := range c.Ports {
		if port.Service == nil {
			continue
		}
		log := log.New("name", port.Service.Name, "port", port.Port, "proto", port.Proto)
		log.Info("monitoring service")
		hb, err := monitor(port, init, c.Env, log)
		if err != nil {
			log.Error("error monitoring service", "err", err)
			os.Exit(70)
		}
		hbs = append(hbs, hb)
	}
	exitCode := babySit(init, hbs)
	log.Info("job exited", "status", exitCode)
	init.mtx.Lock()
	init.changeState(StateExited, "", exitCode)
	init.mtx.Unlock() // Allow calls

	log.Info("exiting")
	init.exit(exitCode)
	return nil
}