Пример #1
0
// loadConfiguration loads the configuration of application
func loadConfiguration(app *AppConfig, rdis *RedisConfig, nats *NatsConfig) {
	err := envconfig.Process(ServiceName, app)
	if err != nil {
		log.Panicln(err)
	}
	err = envconfig.Process("redis", rdis)
	if err != nil {
		log.Panicln(err)
	}
	err = envconfig.Process("nats", nats)
	if err != nil {
		log.Panicln(err)
	}
	if len(os.Getenv(KeyLogly)) > 0 {
		log.Printf("Loading logly token %s \n", os.Getenv(KeyLogly))
		hook := logrusly.NewLogglyHook(os.Getenv(KeyLogly),
			app.Host,
			log.InfoLevel,
			app.Name)
		log.AddHook(hook)
	}

	log.Println("#### LOADED CONFIG #####")
	log.Printf("REDIS_URI: %s \n", rdis.URI)
	log.Printf("NATS_ENDPOINT: %s \n", nats.Endpoint)
}
Пример #2
0
Файл: tui.go Проект: Rompei/vuls
// RunTui execute main logic
func RunTui(historyID string) subcommands.ExitStatus {
	var err error
	scanHistory, err = selectScanHistory(historyID)
	if err != nil {
		log.Fatal(err)
		return subcommands.ExitFailure
	}

	g := gocui.NewGui()
	if err := g.Init(); err != nil {
		log.Panicln(err)
	}
	defer g.Close()

	g.SetLayout(layout)
	if err := keybindings(g); err != nil {
		log.Panicln(err)
	}
	g.SelBgColor = gocui.ColorGreen
	g.SelFgColor = gocui.ColorBlack
	g.Cursor = true

	if err := g.MainLoop(); err != nil && err != gocui.ErrQuit {
		log.Panicln(err)
		return subcommands.ExitFailure
	}

	return subcommands.ExitSuccess
}
Пример #3
0
func readConfig(filepath string) *Config {
	bytes, err := ioutil.ReadFile(filepath)
	if err != nil {
		log.Panicln("Could not read crucial config file for init", err)
	}

	config := &Config{}

	err = yaml.Unmarshal(bytes, config)
	if err != nil {
		log.Panicln("Could not parse config file!", err)
	}

	return config
}
Пример #4
0
func configGet(c *cli.Context) {
	arg := c.Args().Get(0)
	if arg == "" {
		return
	}

	cfg, err := config.LoadConfig()
	if err != nil {
		log.Panicln(err)
	}

	val, err := cfg.Get(arg)
	if err != nil {
		log.WithFields(log.Fields{"cfg": cfg, "arg": arg, "val": val}).Panicln(err)
	}

	printYaml := false
	switch val.(type) {
	case []interface{}:
		printYaml = true
	case map[interface{}]interface{}:
		printYaml = true
	}

	if printYaml {
		bytes, err := yaml.Marshal(val)
		if err != nil {
			log.Fatal(err)
		}
		fmt.Println(string(bytes))
	} else {
		fmt.Println(val)
	}
}
Пример #5
0
func check(e error, message string) {
	if e != nil {
		if message != "" {
			log.Println(message)
		}
		log.Panicln(e)
	}
}
Пример #6
0
func TestDefaultFormatterPanic(t *testing.T) {
	defaultLog.init()
	defer func() {
		if r := recover(); r == nil {
			t.Errorf("Expected panic but did not")
		}
	}()
	logrus.Panicln("Panic Test")
}
Пример #7
0
func (gc *GuiClient) Run() {
	gui := gocui.NewGui()
	if err := gui.Init(); err != nil {
		log.Panicln(err)
	}
	defer gui.Close()
	gc.gui = gui

	if err := gui.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, quit); err != nil {
		log.Panicln(err)
	}
	if err := gui.SetKeybinding("input", gocui.KeyEnter, gocui.ModNone, gc.readLine); err != nil {
		log.Panicln(err)
	}
	gui.ShowCursor = true
	gui.BgColor = gocui.ColorDefault
	gui.FgColor = gocui.ColorDefault
	gui.SetLayout(gc.layout)

	gc.conversations = make(map[string]*Conversation)
	gc.switchConversation(gc.myName)

	gc.dialer = &Dialer{
		gui:          gc,
		pki:          gc.pki,
		myPublicKey:  gc.myPublicKey,
		myPrivateKey: gc.myPrivateKey,
	}
	gc.dialer.Init()

	go func() {
		time.Sleep(500 * time.Millisecond)
		if err := gc.Connect(); err != nil {
			gc.Warnf("Failed to connect: %s\n", err)
		}
		gc.Warnf("Connected: %s\n", gc.pki.EntryServer)
	}()

	err := gui.MainLoop()
	if err != nil && err != gocui.Quit {
		log.Panicln(err)
	}
}
Пример #8
0
func newMachineProvider(executor string) *machineProvider {
	provider := common.GetExecutor(executor)
	if provider == nil {
		logrus.Panicln("Missing", executor)
	}

	return &machineProvider{
		details:  make(machinesDetails),
		machine:  docker_helpers.NewMachineCommand(),
		provider: provider,
	}
}
Пример #9
0
// loadConfiguration loads the configuration of application
func loadConfiguration(app *AppConfig, mgo *MgoConfig, nats *NatsConfig) {
	err := envconfig.Process(ServiceName, app)
	if err != nil {
		log.Panicln(err)
	}
	err = envconfig.Process("mongodb", mgo)
	if err != nil {
		log.Panicln(err)
	}
	err = envconfig.Process("nats", nats)
	if err != nil {
		log.Panicln(err)
	}
	if len(os.Getenv(KeyLogly)) > 0 {
		log.Println("Loading logly token %s", os.Getenv(KeyLogly))
		hook := logrusly.NewLogglyHook(os.Getenv(KeyLogly),
			app.Host,
			log.InfoLevel,
			app.Name)
		log.AddHook(hook)
	}
}
Пример #10
0
func (c *CloudConfig) readFiles() error {
	data, err := readConfig(nil, CloudConfigFile, LocalConfigFile, PrivateConfigFile)
	if err != nil {
		log.Panicln(err)
		return err
	}

	if err := c.merge(data); err != nil {
		log.WithFields(log.Fields{"cfg": c, "data": data}).Panicln(err)
		return err
	}

	return nil
}
Пример #11
0
func (s *RegisterCommand) askRunner() {
	s.URL = s.ask("url", "Please enter the gitlab-ci coordinator URL (e.g. https://gitlab.com/ci):")

	if s.Token != "" {
		log.Infoln("Token specified trying to verify runner...")
		log.Warningln("If you want to register use the '-r' instead of '-t'.")
		if !s.network.VerifyRunner(s.RunnerCredentials) {
			log.Panicln("Failed to verify this runner. Perhaps you are having network problems")
		}
	} else {
		// we store registration token as token, since we pass that to RunnerCredentials
		s.Token = s.ask("registration-token", "Please enter the gitlab-ci token for this runner:")
		s.Name = s.ask("name", "Please enter the gitlab-ci description for this runner:")
		s.TagList = s.ask("tag-list", "Please enter the gitlab-ci tags for this runner (comma separated):", true)

		result := s.network.RegisterRunner(s.RunnerCredentials, s.Name, s.TagList)
		if result == nil {
			log.Panicln("Failed to register this runner. Perhaps you are having network problems")
		}

		s.Token = result.Token
		s.registered = true
	}
}
Пример #12
0
func (s *RegisterCommand) askExecutor() {
	for {
		names := common.GetExecutors()
		executors := strings.Join(names, ", ")
		s.Executor = s.ask("executor", "Please enter the executor: "+executors+":", true)
		if common.NewExecutor(s.Executor) != nil {
			return
		} else {
			message := "Invalid executor specified"
			if s.NonInteractive {
				log.Panicln(message)
			} else {
				log.Errorln(message)
			}
		}
	}
}
Пример #13
0
func (s *RegisterCommand) Execute(context *cli.Context) {
	userModeWarning(true)

	s.context = context
	err := s.loadConfig()
	if err != nil {
		log.Panicln(err)
	}
	s.askRunner()

	if !s.LeaveRunner {
		defer func() {
			// De-register runner on panic
			if r := recover(); r != nil {
				if s.registered {
					s.network.DeleteRunner(s.RunnerCredentials)
				}

				// pass panic to next defer
				panic(r)
			}
		}()

		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)

		go func() {
			signal := <-signals
			s.network.DeleteRunner(s.RunnerCredentials)
			log.Fatalf("RECEIVED SIGNAL: %v", signal)
		}()
	}

	s.askExecutor()

	if s.config.Concurrent < s.Limit {
		log.Warningf("Specified limit (%d) larger then current concurrent limit (%d). Concurrent limit will not be enlarged.", s.Limit, s.config.Concurrent)
	}
	s.Machine = nil

	s.askExecutorOptions()
	s.addRunner(&s.RunnerConfig)
	s.saveConfig()

	log.Printf("Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!")
}
Пример #14
0
func importLoop(newCorpus bool) {
	fi, err := os.Stdin.Stat()
	check(err, "importLoop is unable to get stdin: ")
	if fi.Mode()&os.ModeNamedPipe == 0 {
		log.Panicln("no input: please pipe some data in and try again")
	} else {
		config.Debug = false // improve load performance
		if newCorpus {
			log.Println("PURGE: removing old corpus")
			purgeCorpus()
		}
		log.Println("IMPORT: loading piped data into corpus at " + config.RedisServer)
		reader := bufio.NewReader(os.Stdin)

		var (
			wg  sync.WaitGroup
			sem = make(chan int, runtime.NumCPU()*1000)
		)
		i := 0
		for {
			sem <- 1
			line, err := reader.ReadString('\n')
			if err != nil {
				if err != io.EOF {
					panic(err)
				}
				break
			}
			i++
			wg.Add(1)
			go func(line string) {
				defer wg.Done()
				processInput(line, true)
				<-sem
			}(line)
		}
		wg.Wait()

		log.Println("IMPORT finished, processed " + fmt.Sprint(i) + " lines")
	}

}
Пример #15
0
func (s *RegisterCommand) ask(key, prompt string, allowEmptyOptional ...bool) string {
	allowEmpty := len(allowEmptyOptional) > 0 && allowEmptyOptional[0]

	result := s.context.String(key)
	result = strings.TrimSpace(result)

	if s.NonInteractive || prompt == "" {
		if result == "" && !allowEmpty {
			log.Panicln("The", key, "needs to be entered")
		}
		return result
	}

	for {
		if s.askOnce(prompt, &result, allowEmpty) {
			break
		}
	}

	return result
}
Пример #16
0
func (c *CloudConfig) readCmdline() error {
	log.Debug("Reading config cmdline")
	cmdLine, err := ioutil.ReadFile("/proc/cmdline")
	if err != nil {
		log.Panicln(err)
		return err
	}

	if len(cmdLine) == 0 {
		return nil
	}

	log.Debugf("Config cmdline %s", cmdLine)

	cmdLineObj := parseCmdline(strings.TrimSpace(string(cmdLine)))

	if err := c.merge(cmdLineObj); err != nil {
		log.WithFields(log.Fields{"cfg": c, "cmdLine": cmdLine, "data": cmdLineObj}).Panicln(err)
		return err
	}
	return nil
}
Пример #17
0
func (s *RegisterCommand) ask(key, prompt string, allowEmptyOptional ...bool) string {
	allowEmpty := len(allowEmptyOptional) > 0 && allowEmptyOptional[0]

	result := s.context.String(key)
	result = strings.TrimSpace(result)

	if s.NonInteractive || prompt == "" {
		if result == "" && !allowEmpty {
			log.Panicln("The", key, "needs to be entered")
		}
		return result
	}

	for {
		println(prompt)
		if result != "" {
			print("["+result, "]: ")
		}

		if s.reader == nil {
			s.reader = bufio.NewReader(os.Stdin)
		}

		data, _, err := s.reader.ReadLine()
		if err != nil {
			panic(err)
		}
		newResult := string(data)
		newResult = strings.TrimSpace(newResult)

		if newResult != "" {
			return newResult
		}

		if allowEmpty || result != "" {
			return result
		}
	}
}
Пример #18
0
func CreateDictionaryIndex(arguments map[string]interface{}) {
	dictFilePath := arguments["-d"].(string)

	dictFile, err := os.Open(dictFilePath)

	if err != nil {
		log.Panicln("Error opening dictionary: ", err)
	}

	dictScanner := bufio.NewScanner(dictFile)

	for dictScanner.Scan() {
		word := dictScanner.Text()
		wordHashes := MakeHashes(word, arguments["-m"].([]string))

		log.WithFields(
			log.Fields{
				"word":   word,
				"hashes": wordHashes,
			}).Debugln("Word Hashes")
	}

}
Пример #19
0
func (apsi *ArqPackSetIndex) GetBlobPackFile(abs *ArqBackupSet, ab *ArqBucket, targetSHA1 [20]byte) ([]byte, error) {
	indexes, err := apsi.ListBlobIndexes()
	if err != nil {
		log.Debugf("ArqPackSetIndex %s failed in GetBlobPackFile to listIndexes: %s", err)
		return nil, err
	}
	var packIndexObjectResult *PackIndexObject
	var indexResult string
	for _, index := range indexes {
		indexContents, err := ioutil.ReadFile(index)
		if err != nil {
			log.Panicln(fmt.Sprintf("Could not read index file %s into memory. err: %s", index, err))
		}
		p := bytes.NewBuffer(indexContents)
		var header PackIndex
		binary.Read(p, binary.BigEndian, &header)
		numberLessThanPrefix := int(header.Fanout[targetSHA1[0]-1])
		numberEqualAndLessThenPrefix := int(header.Fanout[targetSHA1[0]])
		var pio PackIndexObject
		p.Next(numberLessThanPrefix * int(unsafe.Sizeof(pio)))

		numberOfObjects := numberEqualAndLessThenPrefix - numberLessThanPrefix
		for i := 0; i < numberOfObjects; i++ {
			pio, _ := readIntoPackIndexObject(p)
			if testEq(pio.SHA1, targetSHA1) {
				packIndexObjectResult = pio
				indexResult = index
				break
			}
		}
	}
	if packIndexObjectResult == nil {
		err = errors.New(fmt.Sprintf("GetBlobPackFile failed to find targetSHA1 %s",
			hex.EncodeToString(targetSHA1[:])))
		log.Debugf("%s", err)
		return nil, err
	}
	packName, _ := splitExt(filepath.Base(indexResult))

	pfo, err := GetObjectFromBlobPackFile(abs, ab, packIndexObjectResult, packName)
	if err != nil {
		log.Debugf("GetBlobPackFile failed to GetObjectFromBlobPackFile: %s", err)
		return nil, err
	}
	decrypted, err := abs.BlobDecrypter.Decrypt(pfo.Data.Data)
	if err != nil {
		log.Debugf("GetBlobPackFile failed to decrypt: %s", err)
		return nil, err
	}
	// Try to decompress, if fails then assume it was uncompressed to begin with
	var b bytes.Buffer
	r, err := gzip.NewReader(bytes.NewBuffer(decrypted))
	if err != nil {
		log.Debugf("GetBlobPackFile decompression failed during NewReader, assume not compresed: ", err)
		return decrypted, nil
	}
	if _, err = io.Copy(&b, r); err != nil {
		log.Debugf("GetBlobPackFile decompression failed during io.Copy, assume not compresed: ", err)
		return decrypted, nil
	}
	if err := r.Close(); err != nil {
		log.Debugf("GetBlobPackFile decompression failed during reader Close, assume not compresed: ", err)
		return decrypted, nil
	}
	return b.Bytes(), nil
}
Пример #20
0
func Run(hostname, cfg, app string, rounds int, rootwait int, debug, testConnect bool, failureRate, rFail, fFail int, logger, suite string) {
	if debug {
		coco.DEBUG = true
	}

	// fmt.Println("EXEC TIMESTAMPER: " + hostname)
	if hostname == "" {
		fmt.Println("hostname is empty")
		log.Fatal("no hostname given")
	}

	// load the configuration
	//log.Println("loading configuration")
	var hc *oldconfig.HostConfig
	var err error
	s := GetSuite(suite)
	opts := oldconfig.ConfigOptions{ConnType: "tcp", Host: hostname, Suite: s}
	if failureRate > 0 || fFail > 0 {
		opts.Faulty = true
	}
	hc, err = oldconfig.LoadConfig(cfg, opts)
	if err != nil {
		fmt.Println(err)
		log.Fatal(err)
	}

	// set FailureRates
	if failureRate > 0 {
		for i := range hc.SNodes {
			hc.SNodes[i].FailureRate = failureRate
		}
	}

	// set root failures
	if rFail > 0 {
		for i := range hc.SNodes {
			hc.SNodes[i].FailAsRootEvery = rFail

		}
	}
	// set follower failures
	// a follower fails on %ffail round with failureRate probability
	for i := range hc.SNodes {
		hc.SNodes[i].FailAsFollowerEvery = fFail
	}

	// run this specific host
	// log.Println("RUNNING HOST CONFIG")
	err = hc.Run(app != "sign", sign.MerkleTree, hostname)
	if err != nil {
		log.Fatal(err)
	}

	defer func(sn *sign.Node) {
		log.Panicln("program has terminated:", hostname)
		sn.Close()
	}(hc.SNodes[0])

	if app == "sign" {
		//log.Println("RUNNING Node")
		// if I am root do the announcement message
		if hc.SNodes[0].IsRoot(0) {
			time.Sleep(3 * time.Second)
			start := time.Now()
			iters := 10

			for i := 0; i < iters; i++ {
				start = time.Now()
				//fmt.Println("ANNOUNCING")
				hc.SNodes[0].LogTest = []byte("Hello World")
				err = hc.SNodes[0].Announce(0,
					&sign.AnnouncementMessage{
						LogTest: hc.SNodes[0].LogTest,
						Round:   i})
				if err != nil {
					log.Println(err)
				}
				elapsed := time.Since(start)
				log.WithFields(log.Fields{
					"file":  logutils.File(),
					"type":  "root_announce",
					"round": i,
					"time":  elapsed,
				}).Info("")
			}

		} else {
			// otherwise wait a little bit (hopefully it finishes by the end of this)
			time.Sleep(30 * time.Second)
		}
	} else if app == "stamp" || app == "vote" {
		// log.Println("RUNNING TIMESTAMPER")
		stampers, _, err := hc.RunTimestamper(0, hostname)
		// get rid of the hc information so it can be GC'ed
		hc = nil
		if err != nil {
			log.Fatal(err)
		}
		for _, s := range stampers {
			// only listen if this is the hostname specified
			if s.Name() == hostname {
				s.Logger = logger
				s.Hostname = hostname
				s.App = app
				if s.IsRoot(0) {
					log.Println("RUNNING ROOT SERVER AT:", hostname, rounds)
					log.Printf("Waiting: %d s\n", rootwait)
					// wait for the other nodes to get set up
					time.Sleep(time.Duration(rootwait) * time.Second)

					log.Println("STARTING ROOT ROUND")
					s.Run("root", rounds)
					// log.Println("\n\nROOT DONE\n\n")

				} else if !testConnect {
					log.Println("RUNNING REGULAR AT:", hostname)
					s.Run("regular", rounds)
					// log.Println("\n\nREGULAR DONE\n\n")
				} else {
					// testing connection
					log.Println("RUNNING TEST_CONNNECT AT:", hostname)
					s.Run("test_connect", rounds)
				}
			}
		}
	}

}
Пример #21
0
func (c *RegisterCommand) Execute(context *cli.Context) {
	c.context = context
	err := c.loadConfig()
	if err != nil {
		log.Panicln(err)
	}
	c.askRunner()

	if !c.LeaveRunner {
		defer func() {
			if r := recover(); r != nil {
				if c.registered {
					c.network.DeleteRunner(c.RunnerCredentials)
				}

				// pass panic to next defer
				panic(r)
			}
		}()

		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Interrupt)

		go func() {
			s := <-signals
			c.network.DeleteRunner(c.RunnerCredentials)
			log.Fatalf("RECEIVED SIGNAL: %v", s)
		}()
	}

	c.askExecutor()

	if limit := helpers.NonZeroOrDefault(c.Limit, 0); c.config.Concurrent < limit {
		log.Warningf("Specified limit (%d) larger then current concurrent limit (%d). Concurrent limit will not be enlarged.", limit, c.config.Concurrent)
	}

	switch c.Executor {
	case "docker":
		c.askDocker()
		c.SSH = nil
		c.Parallels = nil
	case "docker-ssh":
		c.askDocker()
		c.askSSHLogin()
		c.Parallels = nil
	case "ssh":
		c.askSSHServer()
		c.askSSHLogin()
		c.Docker = nil
		c.Parallels = nil
	case "parallels":
		c.askParallels()
		c.askSSHServer()
		c.Docker = nil
	}

	c.addRunner(&c.RunnerConfig)
	c.saveConfig()

	log.Printf("Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!")
}