Example #1
0
func main() {
	var port uint
	var etcdAddr, logLevel string

	flag.UintVarP(&port, "port", "p", 19000, "listen port")
	flag.StringVarP(&etcdAddr, "etcd", "e", defaultEtcdAddr, "address of etcd machine")
	flag.StringVarP(&logLevel, "log-level", "l", "warn", "log level")
	flag.Parse()

	if err := logx.DefaultSetup(logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"level": logLevel,
		}).Fatal("failed to set up logging")
	}

	etcdClient := etcd.NewClient([]string{etcdAddr})

	if !etcdClient.SyncCluster() {
		log.WithFields(log.Fields{
			"addr": etcdAddr,
		}).Fatal("unable to sync etcd cluster")
	}

	ctx := lochness.NewContext(etcdClient)

	_ = Run(port, ctx)
}
Example #2
0
func main() {
	// Handle cli flags
	var port uint
	var bridge, logLevel string
	flag.UintVarP(&port, "port", "p", 40001, "listen port")
	flag.StringVarP(&bridge, "bridge", "b", "mistify0", "bridge to join interfaces to with OVS")
	flag.StringVarP(&logLevel, "log-level", "l", "warning", "log level: debug/info/warning/error/fatal")
	flag.Parse()

	// Set up logging
	if err := logx.DefaultSetup(logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "logx.DefaultSetup",
		}).Fatal("Could not set up logging")
	}

	o, err := ovs.NewOVS(bridge)
	if err != nil {
		os.Exit(1)
	}
	// Run HTTP Server
	if err := o.RunHTTP(port); err != nil {
		os.Exit(1)
	}
}
Example #3
0
func main() {
	if err := logx.DefaultSetup("error"); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "logx.DefaultSetup",
			"level": "error",
		}).Fatal("unable to set up logrus")
	}

	root := &cobra.Command{
		Use:  "img",
		Long: "img is the command line interface to mistify-image-service. All commands support arguments via command line or stdin.",
		Run:  help,
	}
	root.PersistentFlags().BoolVarP(&jsonout, "json", "j", jsonout, "output in json")
	root.PersistentFlags().StringVarP(&server, "server", "s", server, "server address to connect to")

	cmdList := &cobra.Command{
		Use:   "list [<id>...]",
		Short: "List the images",
		Run:   list,
	}
	root.AddCommand(cmdList)

	cmdFetch := &cobra.Command{
		Use:   "fetch <spec>...",
		Short: "Fetch the image(s)",
		Long:  `Fetch new image(s) from a remote source. Where "spec" is a valid image metadata json string.`,
		Run:   fetch,
	}
	root.AddCommand(cmdFetch)

	cmdUpload := &cobra.Command{
		Use:   "upload <spec>...",
		Short: "Upload the image(s)",
		Long:  `Upload new image(s) from a local source. Where "spec" is a valid image metadata json string.`,
		Run:   upload,
	}
	root.AddCommand(cmdUpload)

	cmdDownload := &cobra.Command{
		Use:   "download <id>...",
		Short: "Download the image(s)",
		Run:   download,
	}
	cmdDownload.Flags().StringVarP(&downloadDir, "download-dir", "d", downloadDir, "directory to put downloaded image(s)")
	root.AddCommand(cmdDownload)

	cmdDelete := &cobra.Command{
		Use:   "delete <id>...",
		Short: "Delete images",
		Run:   del,
	}
	root.AddCommand(cmdDelete)

	if err := root.Execute(); err != nil {
		log.WithField("error", err).Fatal("failed to execute root command")
	}
}
Example #4
0
func main() {
	interval := flag.IntP("interval", "i", 60, "update interval in seconds")
	ttl := flag.IntP("ttl", "t", 0, "heartbeat ttl in seconds")
	eaddr := flag.StringP("etcd", "e", "http://localhost:4001", "address of etcd machine")
	id := flag.StringP("id", "d", "", "hypervisor id")
	logLevel := flag.StringP("log-level", "l", "info", "log level")
	flag.Parse()

	if err := logx.DefaultSetup(*logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "logx.DefaultSetup",
			"level": logLevel,
		}).Fatal("failed to set up logging")
	}

	if *ttl == 0 {
		*ttl = 2 * (*interval)
	}

	e := etcd.NewClient([]string{*eaddr})
	c := lochness.NewContext(e)

	hn, err := lochness.SetHypervisorID(*id)
	if err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "lochness.SetHypervisorID",
			"id":    id,
		}).Fatal("failed to set hypervisor id")
	}

	hv, err := c.Hypervisor(hn)
	if err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "context.Hypervisor",
			"id":    hn,
		}).Fatal("failed to instantiate hypervisor")
	}

	for {
		if err = hv.UpdateResources(); err != nil {
			log.WithFields(log.Fields{
				"error": err,
				"func":  "hv.UpdateResources",
			}).Fatal("failed to update hypervisor resources")
		}
		if err = hv.Heartbeat(time.Duration(*ttl)); err != nil {
			log.WithFields(log.Fields{
				"error": err,
				"func":  "hv.Heartbeat",
				"ttl":   *ttl,
			}).Fatal("failed to beat heart")
		}
		time.Sleep(time.Duration(*interval) * time.Second)
	}
}
Example #5
0
func main() {
	var port uint
	var etcdAddr, bstalk, logLevel string

	// Command line flags
	flag.StringVarP(&bstalk, "beanstalk", "b", "127.0.0.1:11300", "address of beanstalkd server")
	flag.StringVarP(&logLevel, "log-level", "l", "warn", "log level")
	flag.StringVarP(&etcdAddr, "etcd", "e", "http://127.0.0.1:4001", "address of etcd server")
	flag.UintVarP(&port, "http", "p", 7544, "http port to publish metrics. set to 0 to disable")
	flag.Parse()

	// Set up logger
	if err := logx.DefaultSetup(logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"level": logLevel,
		}).Fatal("unable to to set up logrus")
	}

	etcdClient := etcd.NewClient([]string{etcdAddr})

	if !etcdClient.SyncCluster() {
		log.WithFields(log.Fields{
			"addr": etcdAddr,
		}).Fatal("unable to sync etcd cluster")
	}

	ctx := lochness.NewContext(etcdClient)

	log.WithField("address", bstalk).Info("connection to beanstalk")
	jobQueue, err := jobqueue.NewClient(bstalk, etcdClient)
	if err != nil {
		log.WithFields(log.Fields{
			"error":   err,
			"address": bstalk,
		}).Fatal("failed to create jobQueue client")
	}

	// Set up metrics
	m := setupMetrics(port)
	if m != nil {
	}

	agent := ctx.NewMistifyAgent()

	// Start consuming
	consume(jobQueue, agent, m)
}
Example #6
0
func main() {

	// Command line options
	var logLevel string
	flag.StringVarP(&logLevel, "log-level", "l", "info", "log level: debug/info/warning/error/critical/fatal")
	flag.Parse()

	// Logging
	if err := logx.DefaultSetup(logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "logx.DefaultSetup",
		}).Fatal("Could not set up logrus")
	}

	// Write logs to this directory
	testDir = "cdhcpd-integration-test-" + uuid.New()
	if err := os.Mkdir(testDir, 0755); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "os.Mkdir",
			"path":  testDir,
		}).Fatal("Could not create directory for test logs")
	}
	hconfPath = testDir + "/hypervisors.conf"
	gconfPath = testDir + "/guests.conf"

	// From now on, write the logs from this script to a file in the test directory as well
	var err error
	selfLog, err = os.Create(testDir + "/integrationtest.log")
	if err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "os.Open",
		}).Fatal("Could not open self-log file for writing")
	}
	defer func() {
		if err := selfLog.Sync(); err != nil {
			fmt.Println("Could not sync self-log file")
		}
		if err := selfLog.Close(); err != nil {
			fmt.Println("Could not close self-log file")
			os.Exit(1)
		}
	}()
	log.SetOutput(selfLog)

	// Set up report and global ok
	r := &bytes.Buffer{}
	_, _ = r.WriteString("cdhcpd Integration Test Results\n")
	_, _ = r.WriteString("==================================\n")
	testOk = true

	// Start up processes
	log.Info("Starting etcd")
	ep := newTestProcess("etcd", exec.Command("etcd", "--data-dir", testDir+"/data.etcd",
		"--listen-client-urls", etcdClientAddress,
		"--listen-peer-urls", etcdPeerAddress,
		"--initial-advertise-peer-urls", etcdPeerAddress,
		"--initial-cluster", "default="+etcdPeerAddress,
		"--advertise-client-urls", etcdClientAddress,
	))
	if err := ep.captureOutput(true); err != nil {
		cleanupAfterError(err, "testProcess.captureOutput", r, nil, ep, nil)
	}
	if err := ep.start(); err != nil {
		cleanupAfterError(err, "testProcess.start", r, nil, ep, nil)
	}
	log.Info("Starting cdhcpd")
	dp := newTestProcess("cdhcpd", exec.Command("cdhcpd", "-e", etcdClientAddress,
		"-d", "example.com",
		"-l", logLevel,
		"-c", testDir,
	))
	if err := dp.captureOutput(true); err != nil {
		if err := ep.finish(); err != nil {
			log.Error("Could not close out etcd")
		}
		cleanupAfterError(err, "testProcess.captureOutput", r, nil, ep, dp)
	}
	if err := dp.start(); err != nil {
		if err := ep.finish(); err != nil {
			log.Error("Could not close out etcd")
		}
		cleanupAfterError(err, "testProcess.start", r, nil, ep, dp)
	}

	// Begin test
	log.Info("Running test")
	time.Sleep(time.Second)
	if ok := reportConfStatus(r, "on start", "not present", "not present"); !ok {
		log.Warning("Failure testing conf status on start")
		testOk = false
	}

	// Set up context
	e := etcd.NewClient([]string{etcdClientAddress})
	c := lochness.NewContext(e)

	// Roughly follow the steps of the demo's guest-setup.sh
	hs := make(map[string]*lochness.Hypervisor)
	gs := make(map[string]*lochness.Guest)

	// Basic setup
	f, err := testhelper.NewFlavor(c, 1, 512, 1024)
	if err != nil {
		cleanupAfterError(err, "testhelper.NewFlavor", r, e, ep, dp)
	}
	n, err := testhelper.NewNetwork(c)
	if err != nil {
		cleanupAfterError(err, "testhelper.NewNetwork", r, e, ep, dp)
	}
	fw, err := testhelper.NewFirewallGroup(c)
	if err != nil {
		cleanupAfterError(err, "testhelper.NewFirewallGroup", r, e, ep, dp)
	}
	time.Sleep(time.Second)
	if ok := reportConfStatus(r, "after setup", "created", "created"); !ok {
		log.Warning("Failure testing conf status after setup")
		testOk = false
	}

	// Add a subnet
	s, err := testhelper.NewSubnet(c, "192.168.100.0/24", net.IPv4(192, 168, 100, 1), net.IPv4(192, 168, 100, 10), net.IPv4(192, 168, 100, 200), n)
	if err != nil {
		cleanupAfterError(err, "testhelper.NewSubnet", r, e, ep, dp)
	}
	time.Sleep(time.Second)
	if ok := reportConfStatus(r, "after subnet creation", "not touched", "not touched"); !ok {
		log.Warning("Failure testing conf status after subnet creation")
		testOk = false
	}

	// Add a hypervisor
	h, err := testhelper.NewHypervisor(c, "fe:dc:ba:98:76:54", net.IPv4(192, 168, 100, 200), net.IPv4(192, 168, 100, 1), net.IPv4(255, 255, 255, 0), "br0", s)
	if err != nil {
		cleanupAfterError(err, "testhelper.NewHypervisor", r, e, ep, dp)
	}
	hs[h.ID] = h
	time.Sleep(time.Second)
	if ok := reportConfStatus(r, "after hypervisor creation", "changed", "not touched"); !ok {
		log.Warning("Failure testing conf status after hypervisor creation")
		testOk = false
	}
	if ok := reportHasHosts(r, "after hypervisor creation", hs, gs); !ok {
		log.Warning("Failure testing for hosts in confs after hypervisor creation")
		testOk = false
	}

	// Create a guest, without setting the subnet or hypervisor (will NOT appear in the conf)
	g, err := testhelper.NewGuest(c, "A4:75:C1:6B:E3:49", n, nil, f, fw, nil)
	if err != nil {
		cleanupAfterError(err, "testhelper.NewGuest", r, e, ep, dp)
	}
	time.Sleep(time.Second)
	if ok := reportConfStatus(r, "after guest creation", "not touched", "not touched"); !ok {
		log.Warning("Failure testing conf status after guest creation")
		testOk = false
	}
	if ok := reportHasHosts(r, "after guest creation", hs, gs); !ok {
		log.Warning("Failure testing for hosts in confs after guest creation")
		testOk = false
	}

	// Add the guest to the hypervisor (would normally be performed by cplacerd, having pulled it from the queue)
	if err := h.AddGuest(g); err != nil {
		cleanupAfterError(err, "hypervisor.AddGuest", r, e, ep, dp)
	}
	gs[g.ID] = g
	time.Sleep(time.Second)
	if ok := reportConfStatus(r, "after adding guest to hypervisor", "not touched", "changed"); !ok {
		log.Warning("Failure testing conf status after adding guest to hypervisor")
		testOk = false
	}
	if ok := reportHasHosts(r, "after adding guest to hypervisor", hs, gs); !ok {
		log.Warning("Failure testing for hosts in confs after adding guest to hypervisor")
		testOk = false
	}

	// Sleep for a few seconds to make sure everything finished, then clean up
	time.Sleep(2 * time.Second)
	log.WithField("path", testDir).Info("Creating test output directory")
	showTestStatus(true)
	cleanup(r, e, ep, dp)
}
Example #7
0
func main() {

	// Command line options
	var etcdAddress, domain, confPath, logLevel string
	flag.StringVarP(&domain, "domain", "d", "", "domain for lochness; required")
	flag.StringVarP(&etcdAddress, "etcd", "e", "http://127.0.0.1:4001", "address of etcd server")
	flag.StringVarP(&confPath, "conf-dir", "c", "/etc/dhcp/", "dhcpd configuration directory")
	flag.StringVarP(&logLevel, "log-level", "l", "warning", "log level: debug/info/warning/error/critical/fatal")
	flag.Parse()

	// Domain is required
	if domain == "" {
		flag.PrintDefaults()
		os.Exit(1)
	}

	// Logging
	if err := logx.DefaultSetup(logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "logx.DefaultSetup",
		}).Fatal("Could not set up logrus")
	}

	hconfPath := path.Join(confPath, "hypervisors.conf")
	gconfPath := path.Join(confPath, "guests.conf")

	// Set up fetcher and refresher
	f := NewFetcher(etcdAddress)
	r := NewRefresher(domain)
	err := f.FetchAll()
	if err != nil {
		os.Exit(1)
	}

	// Update at the start of each run
	restart, err := updateConfigs(f, r, hconfPath, gconfPath)
	if restart {
		restartDhcpd()
	}
	if err != nil {
		os.Exit(1)
	}

	// Create the watcher
	w, err := watcher.New(f.etcdClient)
	if err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "watcher.New",
		}).Fatal("Could not create watcher")
	}

	// Start watching the necessary etcd prefixs
	prefixes := [...]string{"/lochness/hypervisors", "/lochness/guests", "/lochness/subnets"}
	for _, prefix := range prefixes {
		if err := w.Add(prefix); err != nil {
			log.WithFields(log.Fields{
				"error":  err,
				"func":   "watcher.Add",
				"prefix": prefix,
			}).Fatal("Could not add watch prefix")
		}
	}

	// Channel for indicating work in progress
	// (to coordinate clean exiting between the consumer and the signal handler)
	ready := make(chan struct{}, 1)
	ready <- struct{}{}

	for w.Next() {
		// Remove item to indicate processing has begun
		done := <-ready

		// Integrate the response and update the configs if necessary
		refresh, err := f.IntegrateResponse(w.Response())
		if err != nil {
			log.Info("Error on integration; re-fetching")
			err := f.FetchAll()
			if err != nil {
				os.Exit(1)
			}
			refresh = true
		}
		if refresh {
			restart, err := updateConfigs(f, r, hconfPath, gconfPath)
			if restart {
				restartDhcpd()
			}
			if err != nil {
				log.WithFields(log.Fields{
					"error": err,
					"func":  "updateConfigs",
				}).Warn("Could not create watcher")
			}
		}

		// Return item to indicate processing has completed
		ready <- done
	}
	if err := w.Err(); err != nil {
		log.WithField("error", err).Fatal("Watcher encountered an error")
	}

	// Handle signals for clean shutdown
	sigs := make(chan os.Signal)
	signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)

	s := <-sigs
	log.WithField("signal", s).Info("Signal received; waiting for current task to process")
	<-ready // wait until any current processing is finished
	_ = w.Close()
	log.Info("Exiting")
}
Example #8
0
func main() {
	// environment can only override default address
	eaddr := os.Getenv("NCONFIGD_ETCD_ADDRESS")
	if eaddr == "" {
		eaddr = eaddress
	}

	logLevel := flag.StringP("log-level", "l", "warn", "log level")
	flag.StringVarP(&ansibleDir, "ansible", "a", ansibleDir, "directory containing the ansible run command")
	flag.StringP("etcd", "e", eaddress, "address of etcd server")
	configPath := flag.StringP("config", "c", "", "path to config file with prefixs")
	once := flag.BoolP("once", "o", false, "run only once and then exit")
	flag.Parse()
	flag.Visit(func(f *flag.Flag) {
		if f.Name == "etcd" {
			eaddr = f.Value.String()
		}
	})

	// Set up logging
	if err := logx.DefaultSetup(*logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "logx.DefaultSetup",
			"level": *logLevel,
		}).Fatal("failed to set up logging")
	}

	// Load config containing prefixs to watch
	config, err := loadConfig(*configPath)
	if err != nil {
		log.WithFields(log.Fields{
			"error":      err,
			"configPath": *configPath,
		}).Fatal("failed to load config")
	}

	log.WithField("config", config).Info("config loaded")

	// set up etcd connection
	log.WithField("address", eaddr).Info("connection to etcd")
	etcdClient := etcd.NewClient([]string{eaddr})
	// make sure we can actually connect to etcd
	if !etcdClient.SyncCluster() {
		log.WithFields(log.Fields{
			"error":   err,
			"address": eaddr,
		}).Fatal("failed to connect to etcd cluster")
	}

	// always run initially
	runAnsible(config, eaddr, "")
	if *once {
		return
	}

	// set up watcher
	w := watchKeys(config, etcdClient)

	// to coordinate clean exiting between the consumer and the signal handler
	ready := make(chan struct{}, 1)
	ready <- struct{}{}

	// handle events
	go consumeResponses(config, eaddr, w, ready)

	// handle signals for clean shutdown
	sigs := make(chan os.Signal)
	signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)

	s := <-sigs
	log.WithField("signal", s).Info("signal received. waiting for current task to process")
	// wait until any current processing is finished
	<-ready
	_ = w.Close()
	log.Info("exiting")
}
Example #9
0
func main() {
	var port uint
	var etcdAddr, bstalk, logLevel string

	flag.StringVarP(&bstalk, "beanstalk", "b", "127.0.0.1:11300", "address of beanstalkd server")
	flag.StringVarP(&logLevel, "log-level", "l", "warn", "log level")
	flag.StringVarP(&etcdAddr, "etcd", "e", "http://127.0.0.1:4001", "address of etcd server")
	flag.UintVarP(&port, "http", "p", 7543, "address for http interface. set to 0 to disable")
	flag.Parse()

	// Set up logger
	if err := logx.DefaultSetup(logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"level": logLevel,
		}).Fatal("failed to set up logging")
	}

	etcdClient := etcd.NewClient([]string{etcdAddr})

	if !etcdClient.SyncCluster() {
		log.WithFields(log.Fields{
			"addr": etcdAddr,
		}).Fatal("unable to sync etcd cluster")
	}

	log.WithField("address", bstalk).Info("connection to beanstalk")
	jobQueue, err := jobqueue.NewClient(bstalk, etcdClient)
	if err != nil {
		log.WithFields(log.Fields{
			"error":   err,
			"address": bstalk,
		}).Fatal("failed to create jobQueue client")
	}

	// setup metrics
	ms := mapsink.New()
	conf := metrics.DefaultConfig("cplacerd")
	conf.EnableHostname = false
	m, _ := metrics.New(conf, ms)

	if port != 0 {

		http.Handle("/metrics", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
			w.WriteHeader(http.StatusOK)
			w.Header().Set("Content-Type", "application/json")
			if err := json.NewEncoder(w).Encode(ms); err != nil {
				log.WithField("error", err).Error(err)
			}
		}))

		go func() {
			if err := http.ListenAndServe(fmt.Sprintf(":%d", port), nil); err != nil {
				log.WithFields(log.Fields{
					"error": err,
				}).Fatal("error serving")
			}
		}()

	}

	// XXX: we want to try to keep track of where a job is
	// in this pipeline? would have to persist in the job
	funcs := []TaskFunc{
		TaskFunc{
			name:     "check job status",
			function: checkJobStatus,
		},
		TaskFunc{
			name:     "check guest status",
			function: checkGuestStatus,
		},
		TaskFunc{
			name:     "select hypervisor candidate",
			function: selectHypervisor,
		},
		TaskFunc{
			name:     "update job action",
			function: changeJobAction,
		},
		TaskFunc{
			name:     "add task to worker",
			function: addJobToWorker,
		},
		TaskFunc{
			name:     "make task for deletion",
			function: deleteTask,
		},
	}

	for _, f := range funcs {
		f.label = strings.Split(runtime.FuncForPC(reflect.ValueOf(f.function).Pointer()).Name(), ".")[1]
	}

	for {
		task, err := jobQueue.NextCreateTask()
		if err != nil {
			if bCE, ok := err.(beanstalk.ConnError); ok {
				switch bCE {
				case beanstalk.ErrTimeout:
					// Empty queue, continue waiting
					continue
				case beanstalk.ErrDeadline:
					// See docs on beanstalkd deadline
					// We're just going to sleep to let the deadline'd job expire
					// and try to get another job
					m.IncrCounter([]string{"beanstalk", "error", "deadline"}, 1)
					log.Debug(beanstalk.ErrDeadline)
					time.Sleep(5 * time.Second)
					continue
				default:
					// You have failed me for the last time
					log.WithField("error", err).Fatal(err)
				}
			}
			log.WithFields(log.Fields{
				"task":  task,
				"error": err,
			}).Error("invalid task")

			if err := task.Delete(); err != nil {
				log.WithFields(log.Fields{
					"task":  task.ID,
					"error": err,
				}).Error("unable to delete")
			}
		}

		for _, f := range funcs {

			fields := log.Fields{
				"task": task,
			}

			log.WithFields(fields).Debug("running")

			start := time.Now()
			rm, err := f.function(jobQueue, task)

			m.MeasureSince([]string{f.label, "time"}, start)
			m.IncrCounter([]string{f.label, "count"}, 1)

			duration := int(time.Since(start).Seconds() * 1000)
			log.WithFields(fields).WithField("duration", duration).Info("done")

			if err != nil {

				m.IncrCounter([]string{f.label, "error"}, 1)

				log.WithFields(fields).WithField("error", err).Error("task error")

				task.Job.Status = jobqueue.JobStatusError
				task.Job.Error = err.Error()
				if err := task.Job.Save(24 * time.Hour); err != nil {
					log.WithFields(log.Fields{
						"task":  task,
						"error": err,
					}).Error("unable to save")
				}
			}

			if rm {
				if err := task.Delete(); err != nil {
					log.WithFields(log.Fields{
						"task":  task.ID,
						"error": err,
					}).Error("unable to delete")
				}
				break
			}
		}
	}
}
Example #10
0
func main() {
	var port uint
	var etcdAddr, bstalk, logLevel, statsd string

	flag.UintVarP(&port, "port", "p", 18000, "listen port")
	flag.StringVarP(&etcdAddr, "etcd", "e", defaultEtcdAddr, "address of etcd machine")
	flag.StringVarP(&bstalk, "beanstalk", "b", "127.0.0.1:11300", "address of beanstalkd server")
	flag.StringVarP(&logLevel, "log-level", "l", "warn", "log level")
	flag.StringVarP(&statsd, "statsd", "s", "", "statsd address")
	flag.Parse()

	if err := logx.DefaultSetup(logLevel); err != nil {
		log.WithFields(log.Fields{
			"error": err,
			"func":  "logx.DefaultSetup",
			"level": logLevel,
		}).Fatal("unable to set up logrus")
	}

	etcdClient := etcd.NewClient([]string{etcdAddr})

	if !etcdClient.SyncCluster() {
		log.WithFields(log.Fields{
			"error": nil,
			"func":  "etcd.SyncCluster",
			"addr":  etcdAddr,
		}).Fatal("unable to sync etcd cluster")
	}

	ctx := lochness.NewContext(etcdClient)

	log.WithField("address", bstalk).Info("connection to beanstalk")
	jobQueue, err := jobqueue.NewClient(bstalk, etcdClient)
	if err != nil {
		log.WithFields(log.Fields{
			"error":   err,
			"address": bstalk,
		}).Fatal("failed to create jobQueue client")
	}

	// setup metrics
	sink := mapsink.New()
	fanout := metrics.FanoutSink{sink}

	if statsd != "" {
		ss, _ := metrics.NewStatsdSink(statsd)
		fanout = append(fanout, ss)
	}
	conf := metrics.DefaultConfig("cguestd")
	conf.EnableHostname = false
	m, _ := metrics.New(conf, fanout)

	mctx := &metricsContext{
		sink:    sink,
		metrics: m,
		mmw:     mmw.New(m),
	}

	if err := Run(port, ctx, jobQueue, mctx); err != nil {
		log.WithFields(log.Fields{
			"error": err,
		}).Fatal("failed to run server")
	}
}