Exemplo n.º 1
0
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	stripe.Key = conf.StripeKey
	//stripe.LogLevel = 1 // errors only

	producer, err := sarama.NewSyncProducer(strings.Split(conf.SeedBroker, ","), nil)
	if err != nil {
		log.Fatal(err)
	}
	defer func() {
		if err := producer.Close(); err != nil {
			log.Fatal(err)
		}
	}()

	log.Printf("Tailing the log")
	err = tailLog(producer, conf.KafkaTopic)
	if err != nil {
		log.Fatal(err)
	}
}
Exemplo n.º 2
0
func main() {
	var api struct {
		Key string `env:"GOOGLE_PLACES_API_KEY,required"`
	}
	if err := envdecode.Decode(&api); err != nil {
		log.Fatalln(err)
	}
	meander.APIKey = api.Key
	http.HandleFunc("/journeys", cors(func(w http.ResponseWriter, r *http.Request) {
		respond(w, r, meander.Journeys)
	}))
	http.HandleFunc("/recommendations", cors(func(w http.ResponseWriter, r *http.Request) {
		q := &meander.Query{
			Journey: strings.Split(r.URL.Query().Get("journey"), "|"),
		}
		q.Lat, _ = strconv.ParseFloat(r.URL.Query().Get("lat"), 64)
		q.Lng, _ = strconv.ParseFloat(r.URL.Query().Get("lng"), 64)
		q.Radius, _ = strconv.Atoi(r.URL.Query().Get("radius"))
		q.CostRangeStr = r.URL.Query().Get("cost")
		places := q.Run()
		respond(w, r, places)
	}))
	http.ListenAndServe(":8080", http.DefaultServeMux)

}
Exemplo n.º 3
0
// New creates a new SparkClient to be used.
// s := sparkClient.New()
// s.Rooms()
// requires that the environment variable: "SPARK_AUTH_TOKEN" be defined
func New() *SparkClient {
	var conn net.Conn
	var r io.ReadCloser

	var conf struct {
		AuthToken string `env:"SPARK_AUTH_TOKEN"`
	}

	if err := envdecode.Decode(&conf); err != nil {
		log.Fatalln(err)
	}
	client := &http.Client{
		Transport: &http.Transport{
			Dial: func(netw, addr string) (net.Conn, error) {
				if conn != nil {
					conn.Close()
					conn = nil
				}
				netc, err := net.DialTimeout(netw, addr, 5*time.Second)
				if err != nil {
					return nil, err
				}
				conn = netc
				return netc, nil
			},
		},
	}

	return &SparkClient{
		authtoken:  conf.AuthToken,
		conn:       conn,
		httpClient: client,
		reader:     r,
	}
}
Exemplo n.º 4
0
//执行认证
func setupTwitterAuth() {

	var ts struct {
		ConsumerKey    string `env:"SP_TWITTER_KEY,required"`
		ConsumerSecret string `env:"SP_TWITTER_SECRET,required"`
		AccessToken    string `env:"SP_TWITTER_ACCESSTOKEN,required"`
		AccessSecret   string `env:"SP_TWITTER_ACCESSSECRET,required"`
	}

	//envdecode 解析获取 环境变量
	if err := envdecode.Decode(&ts); err != nil {
		log.Fatalln(err)
	}

	creds = &oauth.Credentials{
		Token:  ts.AccessToken,
		Secret: ts.AccessSecret,
	}

	authClient = &oauth.Client{
		Credentials: oauth.Credentials{
			Token:  ts.ConsumerKey,
			Secret: ts.ConsumerSecret,
		},
	}
}
Exemplo n.º 5
0
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	db, err := sql.Open("postgres", conf.DatabaseURL)
	if err != nil {
		log.Fatal(err)
	}

	doneChan := make(chan int)
	pageChan := make(chan Page, PageBuffer)
	start := time.Now()

	// Request events from the API.
	go func() {
		err := requestEvents(conf.StripeKey, conf.StripeURL, doneChan, pageChan)
		if err != nil {
			log.Fatal(err)
		}
	}()

	// And simultaneously, load them to Postgres.
	numProcessed, err := loadEvents(doneChan, pageChan, db)
	if err != nil {
		log.Fatal(err)
	}

	log.Printf("Reached end of the log. Processed %v event(s) in %v.",
		numProcessed, time.Now().Sub(start))
}
Exemplo n.º 6
0
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	stripe.Key = conf.StripeKey
	//stripe.LogLevel = 1 // errors only

	dbSQL, err := sql.Open("postgres", conf.DatabaseURL)
	if err != nil {
		log.Fatal(err)
	}

	db, err := gorm.Open("postgres", dbSQL)
	if err != nil {
		log.Fatal(err)
	}

	//
	// Phase 0: Sample the log
	//
	// Capture a sample of the most recent event in our log. This will allow us
	// to start consuming the feed from after where our snapshot left off.
	//

	log.Printf("STEP: Sampling the log")
	eventSample, err := getEventSample()

	//
	// Phase 1: Load a snapshot
	//
	// Load a base snapshot of data as it stands in the API. This may be stale
	// by the time we iterate to its end but that's okay because we'll get
	// updates by using the log.
	//

	log.Printf("STEP: Loading from snapshot")
	err = loadSnapshot(db)
	if err != nil {
		log.Fatal(err)
	}

	//
	// Phase 2: Tail the log
	//
	// Consume the log from the position of the sample that we took before
	// loading the snapshot. This allows us to get all updates that occurred
	// during the snapshot loading process.
	//

	log.Printf("STEP: Tailing the log from %v", eventSample.ID)
	err = tailLog(db, eventSample)
	if err != nil {
		log.Fatal(err)
	}
}
Exemplo n.º 7
0
func Load() (*Config, error) {
	conf := &Config{}
	err := envdecode.Decode(conf)
	if err != nil {
		return nil, err
	}

	conf.JQVer = jq.Version

	return conf, nil
}
Exemplo n.º 8
0
func New() *TwitStream {
	var ts struct {
		ConsumerKey    string `env:"STRIPSTOCK_TWITTER_CONSUMER_KEY,required"`
		ConsumerSecret string `env:"STRIPSTOCK_TWITTER_CONSUMER_SECRET,required"`
		AccessToken    string `env:"STRIPSTOCK_TWITTER_ACCESS_TOKEN,required"`
		AccessSecret   string `env:"STRIPSTOCK_TWITTER_ACCESS_SECRET,required"`
	}
	var conn net.Conn

	if err := envdecode.Decode(&ts); err != nil {
		log.Fatalln(err)
	}
	client := &http.Client{
		Transport: &http.Transport{
			Dial: func(netw, addr string) (net.Conn, error) {
				if conn != nil {
					conn.Close()
					conn = nil
				}
				netc, err := net.DialTimeout(netw, addr, 5*time.Second)
				if err != nil {
					return nil, err
				}
				conn = netc
				return netc, nil
			},
		},
	}

	var r io.ReadCloser

	return &TwitStream{
		authClient: &oauth.Client{
			Credentials: oauth.Credentials{
				Token:  ts.ConsumerKey,
				Secret: ts.ConsumerSecret,
			},
		},
		creds: &oauth.Credentials{
			Token:  ts.AccessToken,
			Secret: ts.AccessSecret,
		},
		conn:       conn,
		httpClient: client,
		reader:     r,
	}
}
Exemplo n.º 9
0
func main() {
	singularity.InitLog(false)

	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	err = singularity.CreateOutputDirs(singularity.TargetDir)
	if err != nil {
		log.Fatal(err)
	}

	err = serve(conf.Port)
	if err != nil {
		log.Fatal(err)
	}
}
Exemplo n.º 10
0
func dbSession() dropbox.Session {
	var s struct {
		AppKey      string `env:"DROPBOX_APP_KEY,required"`
		AppSecret   string `env:"DROPBOX_APP_SECRET,required"`
		TokenKey    string `env:"DROPBOX_TOKEN_KEY,required"`
		TokenSecret string `env:"DROPBOX_TOKEN_SECRET,required"`
	}
	if err := envdecode.Decode(&s); err != nil {
		log.Fatalln(err)
	}

	return dropbox.Session{
		AppKey:     s.AppKey,
		AppSecret:  s.AppSecret,
		AccessType: "full_dropbox",
		Token: dropbox.AccessToken{
			Key:    s.TokenKey,
			Secret: s.TokenSecret,
		},
	}
}
Exemplo n.º 11
0
func setupTwitterAuth() {
	// github.com/joeshaw/envdecodeでDecodeできる形式
	var ts struct {
		ConsumerKey    string `env:"SOCIAL_POLL_TWITTER_KEY,required"`
		ConsumerSecret string `env:"SOCIAL_POLL_TWITTER_SECRET,required"`
		AccessToken    string `env:"SOCIAL_POLL_TWITTER_ACCESS_TOKEN,required"`
		AccessSecret   string `env:"SOCIAL_POLL_TWITTER_ACCESS_SECRET,required"`
	}

	if err := envdecode.Decode(&ts); err != nil {
		log.Fatalln(err)
	}
	creds = &oauth.Credentials{
		Token:  ts.AccessToken,
		Secret: ts.AccessSecret,
	}
	authClient = &oauth.Client{
		Credentials: oauth.Credentials{
			Token:  ts.ConsumerKey,
			Secret: ts.ConsumerSecret,
		},
	}
}
Exemplo n.º 12
0
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	producer, err := sarama.NewSyncProducer(strings.Split(conf.SeedBroker, ","), nil)
	if err != nil {
		log.Fatal(err)
	}
	defer func() {
		if err := producer.Close(); err != nil {
			log.Fatal(err)
		}
	}()

	log.Printf("Synthesizing %v event(s)", conf.NumEvents)
	err = synthesizeEvents(producer, conf.KafkaTopic, conf.NumEvents)
	if err != nil {
		log.Fatal(err)
	}
}
Exemplo n.º 13
0
func setupTwitterAuth() {
	// we don't need to use the type elsewhere,
	// we define it inline as a anonymous type
	var ts struct {
		ConsumerKey    string `env:"SP_TWITTER_KEY,required"`
		ConsumerSecret string `env:"SP_TWITTER_SECRET,required"`
		AccessToken    string `env:"SP_TWITTER_ACCESSTOKEN,required"`
		AcessSecret    string `env:"SP_TWITTER_ACCESSSECRET,required"`
	}
	if err := envdecode.Decode(&ts); err != nil {
		log.Fatalln(err)
	}
	creds = &oauth.Credentials{
		Token:  ts.AccessToken,
		Secret: ts.AcessSecret,
	}
	authClient = &oauth.Client{
		Credentials: oauth.Credentials{
			Token:  ts.ConsumerKey,
			Secret: ts.ConsumerSecret,
		},
	}
}
Exemplo n.º 14
0
func NewIssConfig() (IssConfig, error) {
	config := IssConfig{}

	err := envdecode.Decode(&config)
	if err != nil {
		return config, err
	}

	if config.PemFile != "" {
		pemFileData, err := ioutil.ReadFile(config.PemFile)
		if err != nil {
			return config, fmt.Errorf("Unable to read pemfile: %s", err)
		}

		cp := x509.NewCertPool()
		if ok := cp.AppendCertsFromPEM(pemFileData); !ok {
			return config, fmt.Errorf("Error parsing PEM: %s", config.PemFile)
		}

		config.TlsConfig = &tls.Config{RootCAs: cp}
	}

	sp := make([]string, 0, 2)
	if config.LibratoSource != "" {
		sp = append(sp, config.LibratoSource)
	}
	if config.Dyno != "" {
		sp = append(sp, config.Dyno)
	}

	config.LibratoSource = strings.Join(sp, ".")

	config.MetricsRegistry = metrics.NewRegistry()

	return config, nil
}
Exemplo n.º 15
0
func readConf() {
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err.Error())
	}
}
Exemplo n.º 16
0
func main() {
	var ts struct {
		ConsumerKey    string `env:"SP_TWITTER_KEY,required"`
		ConsumerSecret string `env:"SP_TWITTER_SECRET,required"`
		AccessToken    string `env:"SP_TWITTER_ACCESSTOKEN,required"`
		AccessSecret   string `env:"SP_TWITTER_ACCESSSECRET,required"`
	}
	if err := envdecode.Decode(&ts); err != nil {
		log.Fatalln(err)
	}
	client := &http.Client{
		Transport: &http.Transport{
			Dial: func(netw, addr string) (net.Conn, error) {
				if conn != nil {
					conn.Close()
					conn = nil
				}
				netc, err := net.DialTimeout(netw, addr, 5*time.Second)
				if err != nil {
					return nil, err
				}
				conn = netc
				return netc, nil
			},
		},
	}
	creds = &oauth.Credentials{
		Token:  ts.AccessToken,
		Secret: ts.AccessSecret,
	}
	authClient = &oauth.Client{
		Credentials: oauth.Credentials{
			Token:  ts.ConsumerKey,
			Secret: ts.ConsumerSecret,
		},
	}
	twitterStopChan := make(chan struct{}, 1)
	publisherStopChan := make(chan struct{}, 1)
	stop := false
	signalChan := make(chan os.Signal, 1)
	go func() {
		<-signalChan
		stop = true
		log.Println("Stopping...")
		closeConn()
	}()
	signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
	votes := make(chan string) // chan for votes
	go func() {
		pub, _ := nsq.NewProducer("localhost:4150", nsq.NewConfig())
		for vote := range votes {
			pub.Publish("votes", []byte(vote)) // publish vote
		}
		log.Println("Publisher: Stopping")
		pub.Stop()
		log.Println("Publisher: Stopped")
		publisherStopChan <- struct{}{}
	}()
	go func() {
		defer func() {
			twitterStopChan <- struct{}{}
		}()
		for {
			if stop {
				log.Println("Twitter: Stopped")
				return
			}
			time.Sleep(2 * time.Second) // calm
			var options []string
			db, err := mgo.Dial("localhost")
			if err != nil {
				log.Fatalln(err)
			}
			iter := db.DB("ballots").C("polls").Find(nil).Iter()
			var p poll
			for iter.Next(&p) {
				options = append(options, p.Options...)
			}
			iter.Close()
			db.Close()

			hashtags := make([]string, len(options))
			for i := range options {
				hashtags[i] = "#" + strings.ToLower(options[i])
			}

			form := url.Values{"track": {strings.Join(hashtags, ",")}}
			formEnc := form.Encode()

			u, _ := url.Parse("https://stream.twitter.com/1.1/statuses/filter.json")
			req, err := http.NewRequest("POST", u.String(), strings.NewReader(formEnc))
			if err != nil {
				log.Println("creating filter request failed:", err)
			}
			req.Header.Set("Authorization", authClient.AuthorizationHeader(creds, "POST", u, form))
			req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
			req.Header.Set("Content-Length", strconv.Itoa(len(formEnc)))

			resp, err := client.Do(req)
			if err != nil {
				log.Println("Error getting response:", err)
				continue
			}
			if resp.StatusCode != http.StatusOK {
				// this is a nice way to see what the error actually is:
				s := bufio.NewScanner(resp.Body)
				s.Scan()
				log.Println(s.Text())
				log.Println(hashtags)
				log.Println("StatusCode =", resp.StatusCode)
				continue
			}

			reader = resp.Body
			decoder := json.NewDecoder(reader)
			for {
				var t tweet
				if err := decoder.Decode(&t); err == nil {
					for _, option := range options {
						if strings.Contains(
							strings.ToLower(t.Text),
							strings.ToLower(option),
						) {
							log.Println("vote:", option)
							votes <- option
						}
					}
				} else {
					break
				}
			}

		}

	}()

	// update by forcing the connection to close
	go func() {
		for {
			time.Sleep(1 * time.Minute)
			closeConn()
			if stop {
				break
			}
		}
	}()

	<-twitterStopChan // important to avoid panic
	close(votes)
	<-publisherStopChan

}
Exemplo n.º 17
0
func main() {
	start := time.Now()
	defer func() {
		log.Infof("Built site in %v.", time.Now().Sub(start))
	}()

	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	singularity.InitLog(conf.Verbose)

	// This is where we stored "versioned" assets like compiled JS and CSS.
	// These assets have a release number that we can increment and by
	// extension quickly invalidate.
	versionedAssetsDir := path.Join(singularity.TargetDir, "assets",
		singularity.Release)

	err = singularity.CreateOutputDirs(singularity.TargetDir)
	if err != nil {
		log.Fatal(err)
	}

	var tasks []*pool.Task

	tasks = append(tasks, pool.NewTask(func() error {
		return linkFonts()
	}))

	tasks = append(tasks, pool.NewTask(func() error {
		return linkImages()
	}))

	tasks = append(tasks, pool.NewTask(func() error {
		return assets.CompileJavascripts(
			path.Join(singularity.AssetsDir, "javascripts"),
			path.Join(versionedAssetsDir, "app.js"))
	}))

	tasks = append(tasks, pool.NewTask(func() error {
		return assets.CompileStylesheets(
			path.Join(singularity.AssetsDir, "stylesheets"),
			path.Join(versionedAssetsDir, "app.css"))
	}))

	articleTasks, err := tasksForArticles()
	if err != nil {
		log.Fatal(err)
	}
	tasks = append(tasks, articleTasks...)

	pageTasks, err := tasksForPages()
	if err != nil {
		log.Fatal(err)
	}
	tasks = append(tasks, pageTasks...)

	if !runTasks(tasks) {
		os.Exit(1)
	}
}
Exemplo n.º 18
0
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	consumer, err := sarama.NewConsumer(strings.Split(conf.SeedBroker, ","), nil)
	if err != nil {
		panic(err)
	}

	defer func() {
		if err := consumer.Close(); err != nil {
			log.Fatalln(err)
		}
	}()

	listEvents := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		limit := DefaultLimit
		if r.Form.Get("limit") != "" {
			var err error
			limit, err = strconv.Atoi(r.Form.Get("limit"))
			if err != nil {
				log.Fatalln(err)
			}
		}

		// The one problem with this system is that the client always gets a
		// duplicate event when requesting the next page. We should probably
		// skip the initial event for convenience.
		offset := sarama.OffsetOldest
		if r.URL.Query().Get("sequence") != "" {
			var err error
			offset, err = strconv.ParseInt(r.URL.Query().Get("sequence"), 10, 64)
			if err != nil {
				log.Fatalln(err)
			}
		}

		log.Printf("Handling request limit %v offset %v", limit, offset)

		partitionConsumer, err := consumer.ConsumePartition(conf.KafkaTopic, 0, offset)
		if err != nil {
			panic(err)
		}

		defer func() {
			if err := partitionConsumer.Close(); err != nil {
				log.Fatalln(err)
			}
		}()

		var events []*map[string]interface{}
		firstLoop := true
		hasMore := true

	ConsumerLoop:
		for {
			select {
			case message := <-partitionConsumer.Messages():
				// skip the first message due to overlap
				if offset != sarama.OffsetOldest && firstLoop {
					firstLoop = false
					break
				}

				var event map[string]interface{}
				err := json.Unmarshal(message.Value, &event)
				if err != nil {
					log.Fatalln(err)
				}

				// Fill the event's new `sequence` field (the public name for
				// "offset" in order to disambiguate from Stripe's old
				// offset-style pagination parameter).
				event["sequence"] = message.Offset

				events = append(events, &event)
				//log.Printf("Consumed message. Now have %v event(s).", len(events))

				// We've fulfilled the requested limit. We're done!
				if len(events) >= limit {
					break ConsumerLoop
				}

			// Unfortunately saram doesn't currently give us a good way of
			// detecting the end of a topic, so detect the end by timing out
			// for now.
			//
			// Note that this could result in a degenerate request which is
			// very long as new messages continue to trickle in until we hit
			// max page size at a rate that's never quite enough to hit our
			// timeout.
			case <-time.After(time.Second * time.Duration(ConsumeTimeout)):
				log.Printf("Timeout. Probably at end of topic.\n")
				hasMore = false
				break ConsumerLoop
			}
		}

		page := &Page{
			Data:    events,
			HasMore: hasMore,
			Object:  "list",
			URL:     "/v1/events",
		}

		data, err := json.Marshal(page)
		if err != nil {
			log.Fatalln(err)
		}

		w.Write(data)
		log.Printf("Responded to client with %v event(s)\n", len(events))
	})

	listEventsGz := gziphandler.GzipHandler(listEvents)
	http.Handle("/v1/events", listEventsGz)

	log.Printf("Starting HTTP server")
	log.Fatal(http.ListenAndServe(":8080", nil))
}
Exemplo n.º 19
0
Arquivo: main.go Projeto: brandur/wgt2
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err.Error())
	}

	client := wgt2.GetSpotifyClient(conf.ClientID, conf.ClientSecret, conf.RefreshToken)

	db, err := wgt2.LoadDatabase(DBFilename)
	if err != nil {
		log.Fatal(err.Error())
	}

	for _, rawArtist := range db.RawArtists.Data {
		dbArtist := db.Artists.GetArtistByWGTName(rawArtist.WGTName)
		if dbArtist != nil {
			log.Printf("Have artist already: %v", dbArtist.Name)
			continue
		}

		searchName := rawArtist.WGTName
		if val, ok := ManualOverrides[searchName]; ok {
			if val == "" {
				log.Printf("Skipping '%v' due to override", searchName)
				continue
			}

			log.Printf("Using manual override '%v' for '%v'", val, searchName)
			searchName = val
		}

		res, err := client.Search(searchName, spotify.SearchTypeArtist)
		if err != nil {
			log.Fatal(err.Error())
		}

		if len(res.Artists.Artists) < 1 {
			log.Printf("Artist not found: %v", rawArtist.WGTName)
			continue
		}

		artist := res.Artists.Artists[0]

		dbArtist = &wgt2.Artist{
			Genres:     artist.Genres,
			ID:         string(artist.ID),
			Name:       artist.Name,
			Popularity: artist.Popularity,
			URI:        string(artist.URI),
			WGTName:    rawArtist.WGTName,
		}

		log.Printf("Found artist: %v (from: %v; popularity: %v/100)",
			artist.Name, rawArtist.WGTName, artist.Popularity)

		tracks, err := client.GetArtistsTopTracks(artist.ID, "US")
		if err != nil {
			log.Fatal(err.Error())
		}

		for _, track := range tracks {
			dbTrack := wgt2.Track{
				ID:         string(track.ID),
				Name:       track.Name,
				Popularity: track.Popularity,
				URI:        string(track.URI),
			}
			dbArtist.TopTracks = append(dbArtist.TopTracks, dbTrack)
		}

		db.Artists.AddArtist(dbArtist)
	}

	err = db.Save()
	if err != nil {
		log.Fatal(err.Error())
	}
}
Exemplo n.º 20
0
// Loads configuration from the process' environment.
func newConfFromEnv() (*Conf, error) {
	conf := &Conf{}
	err := envdecode.Decode(conf)
	return conf, err
}
Exemplo n.º 21
0
func main() {
	// We require each program to have endpoints defined.
	var params struct {
		Endpoints   string `env:"SP_ENDPOINTS,required"`
		Credentials string `env:"SP_ENDPOINT_CREDENTIALS,required"`
	}

	if err := envdecode.Decode(&params); err != nil {
		log.Fatalln(err)
	}

	endpoints := strings.Split(params.Endpoints, ",")
	creds := strings.Split(params.Credentials, ",")
	if len(endpoints) != len(creds) {
		log.Fatal("Each endpoint should have a corresponding credential")
	}

	// All the OIDs we'll snmp walk through to get
	oidWork := map[string]string{
		".1.3.6.1.2.1.1.5":         "sysName",
		".1.3.6.1.2.1.2.2.1.2":     "name",
		".1.3.6.1.2.1.2.2.1.10":    "ifInOctets",
		".1.3.6.1.2.1.2.2.1.16":    "ifOutOctets",
		".1.3.6.1.2.1.31.1.1.1.6":  "ifHCInOctets",
		".1.3.6.1.2.1.31.1.1.1.10": "ifHCOutOctets",
		".1.3.6.1.2.1.31.1.1.1.15": "ifHighSpeed",
	}

	// all the commands we walk through NXAPI to get.
	nxapiWork := map[string]string{
		"show version":            "version",
		"show interface counters": "counters",
	}

	// The main waitgroup for each switch waits for
	// each switch to finish its job.
	var mainWg sync.WaitGroup
	// The array of waitgroups are the jobs that each
	// switch must proccess.
	wg := make([]sync.WaitGroup, len(endpoints))

	// we will run in a continuous loop forever!
	// or at least until the user hits ctrl-c or we get a signal interrupt.
	stop := false
	signalChan := make(chan os.Signal, 1)
	interruptChan := make(chan struct{}, 1)
	go func() {
		<-signalChan
		stop = true
		log.Println("Cleaning up...")
		// send stuff to the channel so that it closes.  That way we don't have to wait so long.
		interruptChan <- struct{}{}
	}()
	signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)

	for {

		if stop {
			break
		}
		// make sure we wait for each of the switches
		mainWg.Add(len(endpoints))

		// go through each device and grab the counters.
		for i, endpoint := range endpoints {
			// figure out which method to run:
			em := strings.Split(endpoint, ":")
			if len(em) < 2 {
				fmt.Println("Invalid input: ", endpoint)
				fmt.Println("please export SP_ENDPOINTS=<name>:<method> where method is SNMP or NXAPI")
				// don't wait for me any more.
				mainWg.Add(-1)
				// go to the next switch
				continue
			}
			/* big map:
			server {
				interface {
					key : value
				}
			}
			*/
			m := make(map[string]map[string]map[string]string)
			if em[1] == "SNMP" {
				go func(e string, c string, w sync.WaitGroup) {
					defer mainWg.Done()
					// mapping hash table for interface names.
					w.Add(len(oidWork))
					// concurrently execute all of the snmp walks
					for oid, name := range oidWork {
						go func(o string, n string) {
							defer w.Done()
							walkValue(e, c, o, n, m)
						}(oid, name)
					}
					w.Wait()
					processCollectedSNMPData(m[e])
				}(em[0], creds[i], wg[i])
			} else if em[1] == "NXAPI" {
				// Yes.. this is hard to process, so let's walk through this.
				// If this switch is an NXAPI endpoint, we are going to kick off a goroutine
				// This go routine is going to call several commands against the switch.
				go func(e string, cre string, w sync.WaitGroup) {
					// When we finish processing all the commands against this switch, tell the main workGroup we are done.
					defer mainWg.Done()

					nxapiHash := map[string]interface{}{}
					// we need to add a waitgroup for this task.
					// This waitgroup is specific for this switch.
					// We dont' want to wait for all the other switches to finish processing before
					// sending the data to the cloud
					w.Add(len(nxapiWork))
					// Go through each command that we want to process.
					for cmd, name := range nxapiWork {
						// kick off a go routine for each of the commands we want to get
						go func(c string, outputName string) {
							// make sure we decrement the switch waitgroup.
							defer w.Done()
							// get the data.  This is where the work takes place.
							getNXAPIData(e, c, outputName, cre, nxapiHash)
						}(cmd, name)
					}
					// wait for the threads on this goroutine to finish.
					w.Wait()
					// wait for all the switch waitgroups to finish.
					// now we have all the data for this switch, let's process it.
					processCollectedNXAPIData(e, nxapiHash)
				}(em[0], creds[i], wg[i])
			}
		}
		// wait for all the snmpwalks to finish.
		mainWg.Wait()

		// now sleep for a while and then run again.
		timeoutchan := make(chan bool)
		go func() {
			fmt.Println("Sleeping for 60 seconds...")
			<-time.After(60 * time.Second)
			timeoutchan <- true
		}()
		select {
		case <-timeoutchan:
			break
		case <-interruptChan:
			// close the channel so other threads will stop blocking and finish.
			close(interruptChan)
			break
		}
	}
}
Exemplo n.º 22
0
Arquivo: main.go Projeto: brandur/wgt2
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err.Error())
	}

	client := wgt2.GetSpotifyClient(conf.ClientID, conf.ClientSecret, conf.RefreshToken)

	db, err := wgt2.LoadDatabase(DBFilename)
	if err != nil {
		log.Fatal(err.Error())
	}

	user, err := client.CurrentUser()
	if err != nil {
		log.Fatal(err.Error())
	}

	var trackIDs []spotify.ID

	//
	// Main playlist
	//

	trackIDs = nil
	for _, artist := range db.Artists.Data {
		bound := TracksPerArtist
		if bound > len(artist.TopTracks) {
			bound = len(artist.TopTracks)
		}
		for _, track := range artist.TopTracks[0:bound] {
			trackIDs = append(trackIDs, spotify.ID(track.ID))
		}
	}

	dbPlaylist, err := updatePlaylist(client, user, PlaylistName, trackIDs)
	if err != nil {
		log.Fatal(err.Error())
	}
	db.Playlists.AddPlaylist(dbPlaylist)

	//
	// Popular playlist
	//

	trackIDs = nil
	for _, artist := range db.Artists.Data {
		// an arbitrary threshold
		if artist.Popularity < 20 {
			continue
		}

		bound := TracksPerArtist
		if bound > len(artist.TopTracks) {
			bound = len(artist.TopTracks)
		}
		for _, track := range artist.TopTracks[0:bound] {
			trackIDs = append(trackIDs, spotify.ID(track.ID))
		}
	}

	dbPlaylist, err = updatePlaylist(client, user, PlaylistNamePopular, trackIDs)
	if err != nil {
		log.Fatal(err.Error())
	}
	db.Playlists.AddPlaylist(dbPlaylist)

	//
	// Obscure playlist
	//

	trackIDs = nil
	for _, artist := range db.Artists.Data {
		// an arbitrary threshold
		if artist.Popularity >= 20 {
			continue
		}

		bound := TracksPerArtist
		if bound > len(artist.TopTracks) {
			bound = len(artist.TopTracks)
		}
		for _, track := range artist.TopTracks[0:bound] {
			trackIDs = append(trackIDs, spotify.ID(track.ID))
		}
	}

	dbPlaylist, err = updatePlaylist(client, user, PlaylistNameObscure, trackIDs)
	if err != nil {
		log.Fatal(err.Error())
	}
	db.Playlists.AddPlaylist(dbPlaylist)

	err = db.Save()
	if err != nil {
		log.Fatal(err.Error())
	}
}
Exemplo n.º 23
0
func main() {
	var params struct {
		Endpoints   string `env:"SP_ENDPOINTS,required"`
		Credentials string `env:"SP_ENDPOINT_CREDENTIALS,required"`
	}

	if err := envdecode.Decode(&params); err != nil {
		log.Fatalln(err)
	}

	endpoints := strings.Split(params.Endpoints, ",")
	// creds should be of the form: user:password,user:password
	cred := strings.Split(params.Credentials, ",")
	if len(endpoints) != len(cred) {
		log.Fatal("Each endpoint should have a corresponding credential")
	}

	for i, ep := range endpoints {
		up := strings.Split(cred[i], ":")
		if len(up) < 2 {
			log.Fatal("Credentials must be of form user:password")
			break
		}
		var jsonStr = []byte(`{
			"ins_api": {
					"version":       "1.0",
					"type":          "cli_show",
					"chunk":         "0",
					"sid":           "1",
					"input":         "sh interface counters",
					"output_format": "json",
				}
			}
				`)
		req, err := http.NewRequest("POST", "http://"+ep+"/ins", bytes.NewBuffer(jsonStr))
		if err != nil {
			log.Println("HTTP Post: ", err)
		}

		req.Header.Set("content-type", "application/json")
		req.SetBasicAuth(up[0], up[1])

		client := &http.Client{}
		resp, err := client.Do(req)
		if err != nil {
			log.Fatal("response error: ", err)
		}
		defer resp.Body.Close()

		fmt.Println("response Status: ", resp.Status)
		fmt.Println("response Headers: ", resp.Header)
		body, err := ioutil.ReadAll(resp.Body)
		//rr := make(map[string]interface{})
		rr := ins_api{}
		err = json.Unmarshal(body, &rr)
		if err != nil {
			log.Fatal("Error unmarshalling: ", err)
		}
		//fmt.Println("responseBody:", string(body))

		fmt.Println(rr)
	}

	log.Println("End of program")
}