func TestDeath(t *testing.T) {
	defer log.Flush()

	Convey("Validate death happens cleanly", t, func() {
		death := NewDeath(syscall.SIGTERM)
		syscall.Kill(os.Getpid(), syscall.SIGTERM)
		death.WaitForDeath()

	})

	Convey("Validate death happens with other signals", t, func() {
		death := NewDeath(syscall.SIGHUP)
		closeMe := &CloseMe{}
		syscall.Kill(os.Getpid(), syscall.SIGHUP)
		death.WaitForDeath(closeMe)
		So(closeMe.Closed, ShouldEqual, 1)
	})

	Convey("Validate death gives up after timeout", t, func() {
		death := NewDeath(syscall.SIGHUP)
		death.setTimeout(10 * time.Millisecond)
		neverClose := &neverClose{}
		syscall.Kill(os.Getpid(), syscall.SIGHUP)
		death.WaitForDeath(neverClose)

	})

}
Exemple #2
0
func New(endpoint []string, path string) *opt {
	cfg := client.Config{
		Endpoints:               endpoint,
		Transport:               client.DefaultTransport,
		HeaderTimeoutPerRequest: time.Second,
	}

	etcdClient, err := client.New(cfg)
	if err != nil {
		log.Errorf("new etcd client error: ", err)
		log.Flush()
		panic(0)
	}
	api := client.NewKeysAPI(etcdClient)
	resp, err := api.Get(context.Background(), "/swarm/docker/swarm/leader", nil)
	if err != nil {
		log.Errorf("get swarm leader error: %v", err)
		log.Flush()
		panic(0)
	}
	return &opt{
		Client:   etcdClient,
		Endpoint: endpoint,
		Path:     path,
		Api:      api,
		Leader:   fmt.Sprintf("http://%s", resp.Node.Value),
	}
}
func libWithSealogMain() {
	defer library.FlushLog()
	defer log.Flush()
	loadAppConfig()
	log.Info("App started")
	log.Info("Config loaded")

	// Disable library log
	log.Info("* Disabled library log test")
	library.DisableLog()
	calcF2()
	log.Info("* Disabled library log tested")

	// Use a special logger for library
	log.Info("* Special output test")
	specialOutputConfig()
	calcF2()
	log.Info("* Special output tested")

	// Use the same logger for both app and library
	log.Info("* Same output test")
	sameOutputConfig()
	calcF2()
	log.Info("* Same output tested")

	log.Info("App finished")
}
func (fr *TrecFileReader) read_to_chan(count int) (i int) {
	//Catch and log panics
	defer func() {
		if x := recover(); x != nil {
			log.Criticalf("Error in document %d of %s: %v", fr.docCounter, fr.filename, x)
			log.Flush()
		}
	}()

	for i := 0; i < count || count == -1; i++ {
		log.Debugf("Reading document %d from %s", i, fr.filename)
		doc, err := fr.read_next_doc()

		switch err {

		case io.EOF:
			log.Debugf("Got EOF for file %s", fr.filename)
			close(fr.documents)
			return i

		case nil:
			log.Debugf("Successfully read document %s", doc.Identifier())
			fr.documents <- doc

		default:
			log.Criticalf("Oh f**k...%v", err)
			panic(err)

		}
	}
	log.Infof("Returning")
	return i
}
// Saves credential cache to disk. This writes to a temporary file first, then moves the file to the config location.
// This elminates from reading partially written credential files, and reduces (but does not eliminate) concurrent
// file access. There is not guarantee here for handling multiple writes at once since there is no out of process locking.
func (f *fileCredentialCache) save(registryCache *RegistryCache) error {
	defer log.Flush()
	file, err := ioutil.TempFile(f.path, ".config.json.tmp")
	if err != nil {
		return err
	}

	buff, err := json.MarshalIndent(registryCache, "", "  ")
	if err != nil {
		file.Close()
		os.Remove(file.Name())
		return err
	}

	_, err = file.Write(buff)

	if err != nil {
		file.Close()
		os.Remove(file.Name())
		return err
	}

	file.Close()
	// note this is only atomic when relying on linux syscalls
	os.Rename(file.Name(), f.fullFilePath())
	return err
}
Exemple #6
0
func TestGetPkgPath(t *testing.T) {
	defer log.Flush()

	Convey("Give pkgPath a ptr", t, func() {
		c := &Closer{}
		name, pkgPath := GetPkgPath(c)
		So(name, ShouldEqual, "Closer")
		So(pkgPath, ShouldEqual, "github.com/vrecan/death")

	})

	Convey("Give pkgPath a interface", t, func() {
		var closable Closable
		closable = Closer{}
		name, pkgPath := GetPkgPath(closable)
		So(name, ShouldEqual, "Closer")
		So(pkgPath, ShouldEqual, "github.com/vrecan/death")
	})

	Convey("Give pkgPath a copy", t, func() {
		c := Closer{}
		name, pkgPath := GetPkgPath(c)
		So(name, ShouldEqual, "Closer")
		So(pkgPath, ShouldEqual, "github.com/vrecan/death")
	})
}
Exemple #7
0
func main() {
	defer log.Flush()
	stdFormat()
	flag.Parse()

	if *clientId == "" {
		fmt.Fprintln(os.Stderr, "--client-id is not specified. See https://developers.google.com/drive/quickstart-go for step-by-step guide.")
		return
	}

	if *clientSecret == "" {
		fmt.Fprintln(os.Stderr, "--client-secret is not specified. See https://developers.google.com/drive/quickstart-go for step-by-step guide.")
		return
	}

	fs := gdrive.NewGDriveFileSystem(*clientId, *clientSecret)

	http.HandleFunc("/debug/gc", gcHandler)
	http.HandleFunc("/favicon.ico", notFoundHandler)
	http.HandleFunc("/", webdav.NewHandler(fs))

	fmt.Printf("Listening on %v\n", *addr)

	err := http.ListenAndServe(*addr, nil)
	if err != nil {
		log.Errorf("Error starting WebDAV server: %v", err)
	}
}
Exemple #8
0
func main() {
	defer log.Flush()
	flag.Parse()
	args := flag.Args()

	if len(args) == 0 {
		usage(actions(nil))
		os.Exit(1)
	}

	logger, err := log.LoggerFromConfigAsString(config.Logger())
	if err != nil {
		die(err)
	}
	log.ReplaceLogger(logger)

	init, err := engine.New()
	if err != nil {
		die(err)
	}
	log.Info(args[0])
	actions := actions(init)
	action, ok := actions[args[0]]
	if !ok {
		usage(actions)
		os.Exit(1)
	}
	err = action.function()
	if err != nil {
		die(err)
	}
}
Exemple #9
0
func TestSTreeMod(t *testing.T) {

	defer log.Flush()

	Convey("Test clone\n", t, func() {

		s, err := NewSTreeJson(strings.NewReader(`{"key1": "val1", "key.2": 1234, "key3": {"key4": true, "key5": -12.34}}`))
		So(err, ShouldBeNil)

		c, err := s.clone()
		So(err, ShouldBeNil)
		s["key1"] = "valMod"

		s3, err := s.STreeVal(".key3")
		s3["key4"] = false

		log.Debugf("Test clone - s: %v", s)
		log.Debugf("Test clone - c: %v", c)

		v1, err := c.StrVal(".key1")
		So(err, ShouldBeNil)
		So(v1, ShouldEqual, "val1")

		v2, err := c.BoolVal(".key3.key4")
		So(err, ShouldBeNil)
		So(v2, ShouldBeTrue)
	})
}
Exemple #10
0
func main() {
	// Set up a done channel, that's shared by the whole pipeline.
	// Closing this channel will kill all pipeline goroutines
	//done := make(chan struct{})
	//defer close(done)

	// Set up logging
	initializeLogging()

	// Flush the log before we shutdown
	defer log.Flush()

	// Parse the command line flags
	config := parseCommandLineFlags()
	gamq.SetConfig(&config)

	if config.ProfilingEnabled {
		defer profile.Start(profile.CPUProfile).Stop()
	}

	log.Infof("Broker started on port: %d", gamq.Configuration.Port)
	log.Infof("Executing on: %d threads", runtime.GOMAXPROCS(-1))

	connectionManager := gamq.NewConnectionManager()
	connectionManager.Start()
}
Exemple #11
0
func Chew(chewChan <-chan *messaging.Food, swallowChan chan *messaging.Food, wg *sync.WaitGroup) {
	log.Info("Let the chewing begin!")
	defer close(swallowChan)
	r := rep.NewReporter()
	r.RegisterStatWIndex("chew", "good")
	for msg := range chewChan {
		if nil != msg {
			//parsing work here probably change what our message type looks like when swallowed

			date := time.Unix(0, msg.GetTimeNano()).UTC()
			fmtDate := date.Format("2006-01-02")
			indexType := "all"
			customerId := "id" //should exist eventually
			index := "documents-" + customerId + "-" + fmtDate
			msg.Index = &index
			msg.IndexType = &indexType
			r.AddStatWIndex("chew", 1, "good")
			swallowChan <- msg
		}
	}
	log.Info("Done chewing")
	log.Flush()
	wg.Done()

}
Exemple #12
0
func main() {

	if len(os.Args) < 3 {
		fmt.Fprintf(os.Stderr, "too few args,args form: <host port>\n")
		os.Exit(1)
	}
	host := os.Args[1]
	port, err := strconv.Atoi(os.Args[2])
	if err != nil {
		fmt.Fprintf(os.Stderr, "invalid port,need integer type,your input port: <port>\n", os.Args[2])
		os.Exit(1)
	}
	ctx := context.GetContext()
	currentServer := make(map[string]interface{})
	currentServer["id"] = "connector-1"
	currentServer["serverType"] = "connector"
	currentServer["host"] = "127.0.0.1"
	currentServer["port"] = 8888
	ctx.CurrentServer = currentServer
	defer seelog.Flush()

	tcp_cnct := tcp_connector.NewTcpConnector(host, port, nil)

	tcp_cnct.RegistNewConnCB(NewConnCB)
	tcp_cnct.RegistNewMsgCB(NewMsgCB)
	tcp_cnct.Start()
	ch := make(chan int)
	<-ch
}
func (tz *BadXMLTokenizer) Tokens() <-chan *Token {

	token_channel := make(chan *Token)
	log.Debugf("Created channel %v as part of Tokens(), with"+
		" Scanner = %v", token_channel, tz)

	go func(ret chan *Token, tz *BadXMLTokenizer) {
		for {
			log.Tracef("Scanner calling Next()")
			tok, err := tz.Next()
			log.Tracef("scanner.Next() returned %s, %v", tok, err)
			switch err {
			case nil:
				log.Debugf("Pushing %s into token channel %v",
					tok, ret)
				ret <- tok
			case io.EOF:
				log.Debugf("received EOF, closing channel")
				close(ret)
				log.Debugf("Closed.")
				log.Flush()
				return
				panic("I should have exited the goroutine but " +
					"didn't")
			}
		}
	}(token_channel, tz)

	return token_channel
}
func adaptiveMain() {
	defer log.Flush()
	loadAdaptiveConfig()
	testMsgIntensity(1)
	testMsgIntensity(5)
	testMsgIntensity(10)
}
Exemple #15
0
func exitOnError(e error) {
	if e != nil {
		log.Errorf("Received error '%s'", e.Error())
		log.Flush()
		os.Exit(1)
	}
}
Exemple #16
0
func main() {
	defer log.Flush()

	clientId := os.Getenv("MONDO_CLIENT_ID")
	clientSecret := os.Getenv("MONDO_CLIENT_SECRET")
	userName := os.Getenv("MONDO_USERNAME")
	password := os.Getenv("MONDO_PASSWORD")

	// Authenticate with Mondo, and return an authenticated MondoClient.
	client, err := mondo.Authenticate(clientId, clientSecret, userName, password)
	if err != nil {
		panic(err)
	}

	// Retrieve all of the accounts.
	acs, err := client.Accounts()
	if err != nil {
		panic(err)
	}

	// Grab our account ID.
	accountId := acs[0].ID

	if _, err := client.RegisterWebhook(accountId, "YOUR_URL_HERE"); err != nil {
		log.Errorf("Error registering webhook: %v", err)
	}
}
Exemple #17
0
func main() {
	// Make sure we flush the log before quitting:
	defer log.Flush()

	var hostInventoryMutex sync.Mutex
	var hostInventory types.HostInventory

	// Configuration object for the HostInventoryUpdater:
	config := types.Config{
		HostUpdateFrequency: *hostUpdateFreq,
		DNSUpdateFrequency:  *dnsUpdateFreq,
		RoleTag:             *roleTag,
		EnvironmentTag:      *environmentTag,
		DNSDomainName:       *dnsDomainName,
		AWSRegion:           *awsRegion,
		DNSTTL:              *dnsTTL,
		HostInventory:       hostInventory,
		HostInventoryMutex:  hostInventoryMutex,
	}

	// Run the host-inventory-updater:
	go hostinventory.Updater(&config)

	// Run the dns-updater:
	go dns.Updater(&config)

	// Run until we get a kill-signal:
	runUntilKillSignal()
}
Exemple #18
0
func typesMain() {
	defer log.Flush()
	syncLogger()
	fmt.Println()
	asyncLoopLogger()
	fmt.Println()
	asyncTimerLogger()
}
Exemple #19
0
func main() {
	defer log.Flush()
	logger, err := log.LoggerFromConfigAsFile("seelog.xml")

	if nil != err {
		log.Warn("Failed to load config", err)
	}
	log.ReplaceLogger(logger)
	flag.Parse()
	statsTransformChannel := make(chan *diskStat.DiskStat, 10)
	statsOutputChannel := make(chan *diskStat.ExtendedIoStats, 10)

	var output outputInterface.Output
	proto := PStdOut

	switch *protocolType {
	case "protobuffers":
		{
			proto = PProtoBuffers
		}
	case "json":
		{
			proto = PJson
		}
	default:
		{
			if *outputType == "zmq" {
				proto = PProtoBuffers
			} else if *outputType == "stdout" {
				proto = PStdOut
			}
		}
	}

	switch *outputType {
	case "zmq":
		output, err = zmqOutput.NewZmqOutput(queueUrl, proto)
	case "nano":
		output, err = nanoMsgOutput.NewNanoMsgOutput(queueUrl, proto)
	default:
		output = &logOutput.LogOutput{proto}
	}
	if nil != err {
		log.Error("Failed to setup output ", err)
	}

	go ioStatTransform.TransformStat(statsTransformChannel, statsOutputChannel)

	go statsOutput.Output(statsOutputChannel, output)

	for {
		readAndSendStats(statsTransformChannel)
		time.Sleep(time.Second * time.Duration(*interval))

	}
	close(statsTransformChannel)
	close(statsOutputChannel)
}
func exceptionsMain() {
	defer log.Flush()
	testMinMax()
	testMin()
	testMax()
	testList()
	testFuncException()
	testFileException()
}
Exemple #21
0
func log_init() {
	defer log.Flush()
	logger, e := log.LoggerFromConfigAsFile(GetConfigDir() + "/log.conf")
	if e != nil {
		log.Criticalf("Error %v", e)
	}
	log.ReplaceLogger(logger)
	//TODO timer watch config file
}
Exemple #22
0
func TestDeath(t *testing.T) {
	defer log.Flush()

	Convey("Validate death handles unhashable types", t, func() {
		u := make(Unhashable)
		death := NewDeath(syscall.SIGTERM)
		syscall.Kill(os.Getpid(), syscall.SIGTERM)
		death.WaitForDeath(u)
	})

	Convey("Validate death happens cleanly", t, func() {
		death := NewDeath(syscall.SIGTERM)
		syscall.Kill(os.Getpid(), syscall.SIGTERM)
		death.WaitForDeath()

	})

	Convey("Validate death happens with other signals", t, func() {
		death := NewDeath(syscall.SIGHUP)
		closeMe := &CloseMe{}
		syscall.Kill(os.Getpid(), syscall.SIGHUP)
		death.WaitForDeath(closeMe)
		So(closeMe.Closed, ShouldEqual, 1)
	})

	Convey("Validate death gives up after timeout", t, func() {
		death := NewDeath(syscall.SIGHUP)
		death.SetTimeout(10 * time.Millisecond)
		neverClose := &neverClose{}
		syscall.Kill(os.Getpid(), syscall.SIGHUP)
		death.WaitForDeath(neverClose)

	})

	Convey("Validate death uses new logger", t, func() {
		death := NewDeath(syscall.SIGHUP)
		closeMe := &CloseMe{}
		logger := &MockLogger{}
		death.SetLogger(logger)

		syscall.Kill(os.Getpid(), syscall.SIGHUP)
		death.WaitForDeath(closeMe)
		So(closeMe.Closed, ShouldEqual, 1)
		So(logger.Logs, ShouldNotBeEmpty)
	})

	Convey("Close multiple things with one that fails the timer", t, func() {
		death := NewDeath(syscall.SIGHUP)
		death.SetTimeout(10 * time.Millisecond)
		neverClose := &neverClose{}
		closeMe := &CloseMe{}
		syscall.Kill(os.Getpid(), syscall.SIGHUP)
		death.WaitForDeath(neverClose, closeMe)
		So(closeMe.Closed, ShouldEqual, 1)
	})

}
Exemple #23
0
func main() {
	defer log.Flush()
	app = cli.NewApp()
	app.Name = "Orchestra"
	app.Usage = "Orchestrate Go Services"
	app.Author = "Vincenzo Prignano"
	app.Email = ""
	app.EnableBashCompletion = true
	app.Commands = []cli.Command{
		*commands.ExportCommand,
		*commands.StartCommand,
		*commands.StopCommand,
		*commands.LogsCommand,
		*commands.RestartCommand,
		*commands.PsCommand,
		*commands.TestCommand,
		*commands.BuildCommand,
		*commands.UpdateCommand,
	}
	app.Flags = []cli.Flag{
		cli.StringFlag{
			Name:   "config, c",
			Value:  "orchestra.yml",
			Usage:  "Specify a different config file to use",
			EnvVar: "ORCHESTRA_CONFIG",
		},
	}
	// init checks for an existing orchestra.yml in the current working directory
	// and creates a new .orchestra directory (if doesn't exist)
	app.Before = func(c *cli.Context) error {
		confVal := c.GlobalString("config")
		if confVal == "" {
			confVal = defaultConfigFile
		}

		config.ConfigPath, _ = filepath.Abs(confVal)
		if _, err := os.Stat(config.ConfigPath); os.IsNotExist(err) {
			fmt.Printf("No %s found. Have you specified the right directory?\n", c.GlobalString("config"))
			os.Exit(1)
		}
		services.ProjectPath, _ = path.Split(config.ConfigPath)
		services.OrchestraServicePath = services.ProjectPath + ".orchestra"

		if err := os.Mkdir(services.OrchestraServicePath, 0766); err != nil && os.IsNotExist(err) {
			fmt.Println(err.Error())
			os.Exit(1)
		}
		config.ParseGlobalConfig()
		services.Init()
		return nil
	}
	app.Version = "0.1"
	app.Run(os.Args)
	if commands.HasErrors() {
		os.Exit(1)
	}
}
func main() {
	configFile := flag.String("cfg", "", "The config file")
	testSummary := flag.Bool("testSummary", false, "Test summary mode")
	flag.Parse()
	file, err := os.Open(*configFile)
	errHndlr(err, FATAL)
	decoder := json.NewDecoder(file)
	config := &StartupConfig{}
	err = decoder.Decode(&config)
	errHndlr(err, FATAL)

	if config.RedisInterval == 0 {
		config.RedisInterval = defaultRedisInterval
	}

	logger, err := log.LoggerFromConfigAsFile(config.SeelogConfig)
	defer log.Flush()

	if err != nil {
		panic("error reading " + config.SeelogConfig)
	}

	fmt.Println("Replacing logger, see log file according to " + config.SeelogConfig)
	if *testSummary {
		fmt.Println("WARNING: testSummary is on!")
	}
	log.ReplaceLogger(logger)

	runtime.GOMAXPROCS(runtime.NumCPU())

	runningConfig, nil := getTmData(config, true)
	go houseKeeping(runningConfig, *testSummary)

	freeList := NewPool(16, config.RedisString)
	<-time.NewTimer(time.Now().Truncate(time.Duration(config.RedisInterval) * time.Second).Add(time.Duration(config.RedisInterval) * time.Second).Sub(time.Now())).C
	tickerChan := time.Tick(time.Duration(config.RedisInterval) * time.Second)
	for now := range tickerChan {
		if now.Second() == 30 {
			trc, err := getTmData(config, false)

			if err == nil {
				runningConfig = trc
			}
		}
		for cdnName, urls := range runningConfig.HealthUrls {
			for _, url := range urls {
				// log.Info(cdnName, "   ", statName, " -> ", url)
				if *testSummary {
					fmt.Println("Skipping stat write - testSummary mode is ON!")
					continue
				}
				go rascalToRedis(cdnName, url, runningConfig.CacheGroupMap, freeList, config)
			}
		}
	}
}
Exemple #25
0
func checkConfigAndDie() {
	if len(config.AsteriskAddr) == 0 {
		log.Critical("Asterisk address is null or empty. Please check the configuraiton file.\n")
		log.Flush()
		os.Exit(1)
	}

	if len(config.AsteriskUser) == 0 || len(config.AsteriskPassword) == 0 {
		log.Critical("Asterisk credentials missing.  Please check the configuration file.\n")
		log.Flush()
		os.Exit(1)
	}

	if config.TestCallActive && config.TestCallSchedule < 30 {
		log.Criticalf("Asterisk tesing interval is too short : %d. Minimal value is 30(seconds).Please check the configuration file.\n", config.TestCallSchedule)
		log.Flush()
		os.Exit(1)
	}
}
Exemple #26
0
func main() {
	logger, err := log.LoggerFromConfigAsString("<seelog type=\"asynctimer\" asyncinterval=\"500000000\"/>")
	checkFail(err)
	log.ReplaceLogger(logger)
	defer log.Flush()
	println("start")

	log.Info("Hello from Seelog!")

	time.Sleep(time.Second * 10)
}
Exemple #27
0
func formatsMain() {
	defer log.Flush()
	defaultFormat()
	stdFormat()
	dateTimeFormat()
	dateTimeCustomFormat()
	logLevelTypesFormat()
	fileTypesFormat()
	funcFormat()
	xmlFormat()
}
Exemple #28
0
func main() {
	defer log.Flush()

	err := godotenv.Load()
	if err != nil {
		panic(err)
	}

	// bucket := awsutil.Bucket()
	// log.Warn(bucket.Name)
	// data := []byte("Hello, Goamz!!")
	// if err := bucket.Put("sample.txt", data, "text/plain", s3.BucketOwnerFull, s3.Options{}); err != nil {
	// 	panic(err)
	// }
	// if 1 == 1 {
	// 	return
	// }

	for {
		job, err := jobs.TakeJob(60 * time.Second)
		if err != nil {
			if connErr, ok := err.(beanstalk.ConnError); ok && connErr.Err == beanstalk.ErrTimeout {
				time.Sleep(1 * time.Second)
				continue
			}
			// Crash driven design. If we got an error taking a job and it
			// wasn't because we timed out waiting for one, we crash out and
			// hope a fresh instance has better luck. Of course if beanstalk
			// is down we're gonna keep crashing, but our supervisor should be
			// doing exponential backoff on process restarts anyway.
			panic(err)
		}

		payload := job.Payload()
		jobName := payload["Job"]
		log.Infof("Processing job %s", jobName)
		log.Debug("Job data ", payload)
		switch jobName {
		case "build-docset":
			// TODO: some kind of lock here in case we queue multiple jobs to
			// build the same docset.
			err = buildDocset(payload["GroupId"], payload["ArtifactId"], payload["Version"])
		}

		if err != nil {
			log.Warnf("Job %s failed", jobName, err)
			job.Release(1 * time.Minute)
		} else {
			log.Infof("Job %s was successful.", jobName)
			job.Complete()
		}
	}
}
Exemple #29
0
func main() {
	defer log.Flush()

	clientId := os.Getenv("MONDO_CLIENT_ID")
	clientSecret := os.Getenv("MONDO_CLIENT_SECRET")
	userName := os.Getenv("MONDO_USERNAME")
	password := os.Getenv("MONDO_PASSWORD")

	// Authenticate with Mondo, and return an authenticated MondoClient.
	client, err := mondo.Authenticate(clientId, clientSecret, userName, password)
	if err != nil {
		panic(err)
	}

	log.Infof("Authenticated with Mondo successfully!")

	// Retrieve all of the accounts.
	acs, err := client.Accounts()
	if err != nil {
		panic(err)
	}

	if len(acs) == 0 {
		log.Errorf("No accounts with Mondo found :( Sign up!")
		return
	}

	// Grab our account ID.
	accountId := acs[0].ID

	// Get all transactions. You can also get a specific transaction by ID.
	transactions, err := client.Transactions(accountId, "", "", 100)
	if err != nil {
		panic(err)
	}

	if len(transactions) == 0 {
		log.Warnf("No transactions found. Sorry!")
		return
	}

	// Render a lovely table of all of your transactions.
	table := transactionsToTable(transactions...)
	table.Render()

	// Create a feed item in your feed.
	// Don't run this, unless you want to spam yourself, as there is no way currently to delete a feed item.
	// err = client.CreateFeedItem(id, "Hi there!", "https://blog.golang.org/gopher/gopher.png", "",
	// t "", "", "This is a test item, from Go")
	// if err != nil {
	// 	panic(err)
	// }
}
Exemple #30
0
func main() {
	defer seelog.Flush()

	seelog.LoggerFromConfigAsString("formatid=\"debug\"")

	flag.Parse()

	cfg := FtpCfg{*host, *user, *pw, *port}
	fClient, err := NewFtpClient(cfg)
	if err != nil {
		panic(err)
	}
	iClient, err := NewInfluxClient(*surl, *db)
	if err != nil {
		panic(err)
	}

	files := make([]*FtpToInflux, 0)

	scanner := bufio.NewScanner(os.Stdin)
	for scanner.Scan() {
		line := scanner.Text()
		seelog.Tracef("Handle line '%s'", line)
		if strings.HasPrefix(line, commentPrefix) {
			//Comment
			continue
		}
		splittedLine := strings.Split(line, space)
		if len(splittedLine) != 2 {
			seelog.Warnf("Line '%s' has not exactly one space", line)
			continue
		}
		data := &FtpToInflux{splittedLine[0], strings.Split(splittedLine[1], sep)}
		files = append(files, data)
	}

	for _, f := range files {
		seelog.Tracef("Start with file '%s'!", f.Filename)
		buf, err := fClient.Download(f.Filename)
		if err != nil {
			seelog.Warnf("Error downloading file '%s': %v", f.Filename, err)
			continue
		}
		datas := Transform(buf)
		err = iClient.Write(datas, f.Measurements)
		if err != nil {
			seelog.Warnf("Error writing Data: %v", err)
			continue
		}
		seelog.Tracef("File '%s' downloaded and written to %d measurements!", f.Filename, len(f.Measurements))
	}
}