func (c *consulCoordinator) watch(key string, cf consulFunc, stop chan struct{}) (chan interface{}, chan error) { new := make(chan interface{}) errc := make(chan error) go func() { kv := c.client.KV() var lastIndex uint64 for { select { case <-stop: return default: // do a blocking query ret, queryMeta, err := cf(kv, key, lastIndex) log.Println("[consul] value updated at", key, ret) if err != nil { log.Println(err) errc <- err return } if ret != nil { new <- ret } lastIndex = queryMeta.LastIndex } } }() return new, errc }
func getPorts() ([]OsSerialPort, error) { resolver, err := bonjour.NewResolver(nil) if err != nil { log.Println("Failed to initialize resolver:", err.Error()) return nil, err } results := make(chan *bonjour.ServiceEntry) timeout := make(chan bool, 1) go func(exitCh chan<- bool) { time.Sleep(timeoutConst * time.Second) exitCh <- true close(results) }(resolver.Exit) arrPorts := []OsSerialPort{} go func(results chan *bonjour.ServiceEntry, exitCh chan<- bool) { for e := range results { arrPorts = append(arrPorts, OsSerialPort{Name: e.AddrIPv4.String(), IdProduct: e.Instance, IdVendor: strings.Join(e.Text[:], " "), NetworkPort: true}) } timeout <- true }(results, resolver.Exit) err = resolver.Browse("_arduino._tcp", "", results) if err != nil { log.Println("Failed to browse:", err.Error()) return nil, err } // wait for some kind of timeout and return arrPorts select { case <-timeout: return arrPorts, nil } }
// execCmd handles http requests received for the path "/exec" func execCmd(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "OK") go func() { cmd := r.URL.Query().Get("cmd") logrus.Info("got cmd: " + cmd) cmd, _ = url.QueryUnescape(cmd) arr := strings.Split(cmd, " ") logrus.Println("arr: ", arr) if len(arr) > 0 { cmd := exec.Command(arr[0], arr[1:]...) // Stdout buffer // cmdOutput := &bytes.Buffer{} // Attach buffer to command // cmd.Stdout = cmdOutput // Execute command // printCommand(cmd) err := cmd.Run() // will wait for command to return if err != nil { logrus.Println("Error:", err.Error()) } } }() }
func (d *driver) CreateSnapshot( runAsync bool, snapshotName, volumeID, description string) ([]*core.Snapshot, error) { resp, err := d.ec2Instance.CreateSnapshot(volumeID, description) if err != nil { return nil, err } if snapshotName != "" { _, err := d.ec2Instance.CreateTags( []string{resp.Id}, []ec2.Tag{{"Name", snapshotName}}) if err != nil { return nil, err } } if !runAsync { log.Println("Waiting for snapshot to complete") err = d.waitSnapshotComplete(resp.Snapshot.Id) if err != nil { return nil, err } } snapshot, err := d.GetSnapshot("", resp.Snapshot.Id, "") if err != nil { return nil, err } log.Println("Created Snapshot: " + snapshot[0].SnapshotID) return snapshot, nil }
func initVars() { done = make(chan int) resolvAddrs() password := flag.String("password", "0000", "Inverter password") groupType := flag.String("grouptype", "USER", "'USER' or 'INSTALLER'") server := flag.String("server", "tcp://notify.erkkila.org:1883", "The MQTT server to connect to") topic := flag.String("topic", "smapoller", "The chat room to enter. default 'smapoller'") name := flag.String("name", "pee", "Username to be displayed") flag.Parse() log.Println("SetP:", *password) log.Println("Gt:", *groupType) protocol.SetPassword(*password) protocol.SetGroupType(*groupType) protocol.SetAppSUSyID(AppSUSyID) protocol.SetAppSerial(getSerial()) mqs = GetMQTTSettings() mqs.SetServer(server) mqs.SetTopic(topic) mqs.SetName(name) }
func writePacketLength(dst []byte) error { var LENGTH_OFFSET uint = 13 var LENGTH_HEADER uint = 15 var MAX_LENGTH uint = 0xFFFF if uint(len(dst)) < LENGTH_HEADER { log.Panic("Tried to set packet length to short buffer") } if uint(len(dst)) > MAX_LENGTH { log.Panic("Tried to set to big a packet length") } log.Println("Length of packet is:", len(dst)) var length uint = uint(len(dst)) - (LENGTH_OFFSET + 7) log.Println("Setting packet length to:", length) l := make([]byte, 2) binary.LittleEndian.PutUint16(l, uint16(length)) i := LENGTH_OFFSET dst[i] = l[0] i++ dst[i] = l[1] return nil }
func (d *driver) Delete(volumeID api.VolumeID) error { v, err := d.GetVol(volumeID) if err != nil { logrus.Println(err) return err } bd, ok := d.buseDevices[v.DevicePath] if !ok { err = fmt.Errorf("Cannot locate a BUSE device for %s", v.DevicePath) logrus.Println(err) return err } // Clean up buse block file and close the NBD connection. os.Remove(bd.file) bd.f.Close() bd.nbd.Disconnect() logrus.Infof("BUSE deleted volume %v at NBD device %s", volumeID, v.DevicePath) err = d.DeleteVol(volumeID) if err != nil { logrus.Println(err) return err } return nil }
func runWatcher(address, datacenter, watchType string) { consulAlert := os.Args[0] cmd := exec.Command( "consul", "watch", "-http-addr", address, "-datacenter", datacenter, "-type", watchType, consulAlert, "watch", watchType) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { var exitCode int switch err.(type) { case *exec.ExitError: exitError, _ := err.(*exec.ExitError) status, _ := exitError.Sys().(syscall.WaitStatus) exitCode = status.ExitStatus() log.Println("Shutting down watcher --> Exit Code: ", exitCode) case *exec.Error: exitCode = 1 log.Println("Shutting down watcher --> Something went wrong running consul watch: ", err.Error()) default: exitCode = 127 log.Println("Shutting down watcher --> Unknown error: ", err.Error()) } os.Exit(exitCode) } else { log.Printf("Execution complete.") } }
func CmdList(databaseName string, page, pageSize int, id IDb, is services.IServices) error { service, err := is.RetrieveByLabel(databaseName) if err != nil { return err } if service == nil { return fmt.Errorf("Could not find a service with the label \"%s\". You can list services with the \"catalyze services\" command.", databaseName) } jobs, err := id.List(page, pageSize, service) if err != nil { return err } sort.Sort(SortedJobs(*jobs)) for _, job := range *jobs { logrus.Printf("%s %s (status = %s)", job.ID, job.CreatedAt, job.Status) } if len(*jobs) == pageSize && page == 1 { logrus.Println("(for older backups, try with --page 2 or adjust --page-size)") } if len(*jobs) == 0 && page == 1 { logrus.Println("No backups created yet for this service.") } else if len(*jobs) == 0 { logrus.Println("No backups found with the given parameters.") } return nil }
func CmdUpdate(iu IUpdate) error { logrus.Println("Checking for available updates...") needsUpdate, err := iu.Check() if err != nil { return err } // check if we can overwrite exe if needsUpdate && (runtime.GOOS == "linux" || runtime.GOOS == "darwin") { err = verifyExeDirWriteable() if err != nil { return err } } if !needsUpdate { logrus.Println("You are already running the latest version of the Catalyze CLI") return nil } logrus.Printf("Version %s is available. Updating your CLI...", updater.AutoUpdater.Info.Version) err = iu.Update() if err != nil { return err } iu.UpdatePods() logrus.Printf("Your CLI has been updated to version %s", updater.AutoUpdater.Info.Version) return nil }
func (sn *Node) StartVotingRound(v *Vote) error { log.Println(sn.Name(), "start voting round") sn.nRounds = sn.LastSeenRound // during view changes, only accept view change related votes if sn.ChangingView && v.Vcv == nil { log.Println(sn.Name(), "start signing round: changingViewError") return ChangingViewError } sn.nRounds++ v.Round = sn.nRounds v.Index = int(atomic.LoadInt64(&sn.LastSeenVote)) + 1 v.Count = &Count{} v.Confirmed = false // only default fill-in view numbers when not prefilled if v.View == 0 { v.View = sn.ViewNo } if v.Av != nil && v.Av.View == 0 { v.Av.View = sn.ViewNo + 1 } if v.Rv != nil && v.Rv.View == 0 { v.Rv.View = sn.ViewNo + 1 } if v.Vcv != nil && v.Vcv.View == 0 { v.Vcv.View = sn.ViewNo + 1 } return sn.StartAnnouncement( &AnnouncementMessage{LogTest: []byte("vote round"), Round: sn.nRounds, Vote: v}) }
func prepareQuery(topic *string, startTime *string, endTime *string, lastEvaluatedKey *map[string]*dynamodb.AttributeValue) *dynamodb.QueryInput { params := &dynamodb.QueryInput{ TableName: aws.String("midmessages"), // Required ConsistentRead: aws.Bool(false), ExclusiveStartKey: prepareLastKey(lastEvaluatedKey), ExpressionAttributeNames: map[string]*string{ "#ts": aws.String("timestamp"), }, ExpressionAttributeValues: *prepareAttrValues(topic, startTime, endTime), //FilterExpression: nil, //IndexName: nil, KeyConditionExpression: prepareKeyCondition(topic, startTime, endTime), //KeyConditions: nil, // Limit: aws.Long(1), ProjectionExpression: aws.String("topic,#ts,sender,messageID,message,subject,notificationID"), //QueryFilter: nil, // ReturnConsumedCapacity: aws.String("TOTAL"), ScanIndexForward: aws.Bool(true), } log.Println("endTime: ", *endTime) log.Println("prepareQuery:KeyConditionExpression: ", *params.KeyConditionExpression) return params }
func (e *AbstractExecutor) Wait() error { e.Build.BuildState = common.Running buildTimeout := e.Build.Timeout if buildTimeout <= 0 { buildTimeout = common.DefaultTimeout } // Wait for signals: cancel, timeout, abort or finish log.Debugln(e.Config.ShortDescription(), e.Build.ID, "Waiting for signals...") select { case <-e.BuildCanceled: log.Println(e.Config.ShortDescription(), e.Build.ID, "Build got canceled.") e.Build.FinishBuild(common.Failed, "Build got canceled") case <-time.After(time.Duration(buildTimeout) * time.Second): log.Println(e.Config.ShortDescription(), e.Build.ID, "Build timedout.") e.Build.FinishBuild(common.Failed, "CI Timeout. Execution took longer then %d seconds", buildTimeout) case signal := <-e.Build.BuildAbort: log.Println(e.Config.ShortDescription(), e.Build.ID, "Build got aborted", signal) e.Build.FinishBuild(common.Failed, "Build got aborted: %v", signal) case err := <-e.BuildFinish: if err != nil { return err } log.Println(e.Config.ShortDescription(), e.Build.ID, "Build succeeded.") e.Build.FinishBuild(common.Success, "Build succeeded.") } return nil }
func (c *consulCoordinator) Start(addr net.Addr, errCh chan error) error { if addr == nil { addr = &net.TCPAddr{} } c.addr = addr session := c.client.Session() // set session to delete our keys on invalidation sessionOptions := &api.SessionEntry{ Behavior: api.SessionBehaviorDelete, LockDelay: c.config.LockDelay, TTL: c.config.TTL, } var sessionID string var err error err = backoff.RetryNotify(func() error { sessionID, _, err = session.Create(sessionOptions, nil) return err }, backoff.NewExponentialBackOff(), func(err error, t time.Duration) { log.Println("Cannot create session, retrying in", t, ". Error:", err) }) if err != nil { return fmt.Errorf("failed to create consul session: %v", err) } // set up a long-running goroutine for renewing the session c.sessionRenew = make(chan struct{}) c.sessionID = sessionID log.Println("[coordinator] Coordinator ready") go func() { errCh <- session.RenewPeriodic("5s", sessionID, nil, c.sessionRenew) }() return nil }
func write(wr writeRequest, id string) { cmds, idArr, bufTypeArr := createCommands(wr, id) qr := qReport{ Cmd: "Queued", //Type: bufTypeArr, Ids: idArr, D: cmds, QCnt: wr.p.itemsInBuffer, Port: wr.p.portConf.Name, } json, _ := json.Marshal(qr) h.broadcastSys <- json // now send off the commands to the appropriate channel for index, cmdToSendToChannel := range cmds { //cmdIdCtr++ //cmdId := "fakeid-" + strconv.Itoa(cmdIdCtr) cmdId := idArr[index] if bufTypeArr[index] == "Buf" { log.Println("Send was normal send, so sending to wr.p.sendBuffered") wr.p.sendBuffered <- Cmd{cmdToSendToChannel, cmdId, false, false} } else { log.Println("Send was sendnobuf, so sending to wr.p.sendNoBuf") wr.p.sendNoBuf <- Cmd{cmdToSendToChannel, cmdId, true, false} } } // garbage collect if *gcType == "max" { debug.FreeOSMemory() } }
func (d *driver) DetachVolume( runAsync bool, volumeID, blank string, force bool) error { if volumeID == "" { return errors.ErrMissingVolumeID } volumes, err := d.getVolume(volumeID, "") if err != nil { return err } if volumes[0].Status == "available" { return nil } _, err = d.ec2Instance.DetachVolume(volumeID, force) if err != nil { return err } if !runAsync { log.Println("Waiting for volume detachment to complete") err = d.waitVolumeDetach(volumeID) if err != nil { return err } } log.Println("Detached volume", volumeID) return nil }
func GetGodos() []model.Godo { var godos []model.Godo // Load all godos from the database rows, err := db.Query("SELECT ID, Title, Content FROM godo ORDER BY Rank") if err != nil { log.Println(err) } // Ensure that the database connection is closed defer rows.Close() for rows.Next() { var id int var title string var content string // Fill variables with data from row if err := rows.Scan(&id, &title, &content); err != nil { log.Println(err) } // Create a Godo from the extracted data godo := model.Godo{ ID: id, Title: title, ContentMD: content, ContentHTML: template.HTML(string(md.MarkdownCommon([]byte(content))))} godos = append(godos, godo) } return godos }
func PopulateTemplates() { templateDir := "./templates/" // Get all files in the templates folder files, err := ioutil.ReadDir(templateDir) if err != nil { log.Println("Error population templates") } var templateFiles []string // Iterate over all files to filter for .html for _, file := range files { filename := file.Name() if strings.HasSuffix(filename, ".html") { templateFiles = append(templateFiles, templateDir+filename) } } templates, err := template.ParseFiles(templateFiles...) if err != nil { log.Println("Error population templates") } templateIndex = templates.Lookup("index.html") }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *profile != "" { flag, err := os.Create(*profile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(flag) defer pprof.StopCPUProfile() } if *authTokenFlag == "" { *authTokenFlag = "public" log.Println("Using default Token", *authTokenFlag) } if *certFile == "" || *keyFile == "" { log.Error("need cert file and key file .pem") return } th := throttled.RateLimit(throttled.PerMin(30), &throttled.VaryBy{RemoteAddr: true}, store.NewMemStore(100), ) mux := http.NewServeMux() mux.Handle("/remoton/", http.StripPrefix("/remoton", remoton.NewServer(func(authToken string, r *http.Request) bool { return authToken == *authTokenFlag }, func() string { return uuid.NewV4().String()[0:8] }))) log.Println("Listen at HTTPS ", *listenAddr) sSecure := &http.Server{ Addr: *listenAddr, Handler: th.Throttle(mux), } host, port, err := net.SplitHostPort(*listenAddr) if err != nil { log.Fatal(err) } iport, err := strconv.Atoi(port) if err != nil { panic(err) } listenInsecureAddr := net.JoinHostPort(host, strconv.Itoa(iport-1)) //Default insecure it's a previous port log.Println("Listen at HTTP ", listenInsecureAddr) sInsecure := &http.Server{ Addr: listenInsecureAddr, Handler: th.Throttle(mux), } go sInsecure.ListenAndServe() log.Fatal(sSecure.ListenAndServeTLS(*certFile, *keyFile)) }
func findLocationByCoordinates(loc string) *Location { p, err := parseCoordinate(loc) if err != nil { log.Println(err) return nil } addr, err := geocoder.ReverseGeocode(p) if err != nil { log.Println(err) return nil } tokens := strings.Split(addr, ",") country := strings.ToLower(tokens[len(tokens)-1]) if l, ok := countryTrie.Find(country).(*Location); ok && l != nil { l.Address = addr return l } if l := countryCodes[country]; l != nil { l.Address = addr return l } return nil }
func (d *driver) Create(locator api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (api.VolumeID, error) { volumeID := uuid.New() volumeID = strings.TrimSuffix(volumeID, "\n") // Create a directory on the NFS server with this UUID. volPath := path.Join(nfsMountPath, volumeID) err := os.MkdirAll(volPath, 0744) if err != nil { log.Println(err) return api.BadVolumeID, err } if source != nil { if len(source.Seed) != 0 { seed, err := seed.New(source.Seed, spec.ConfigLabels) if err != nil { log.Warnf("Failed to initailize seed from %q : %v", source.Seed, err) return api.BadVolumeID, err } err = seed.Load(volPath) if err != nil { log.Warnf("Failed to seed from %q to %q: %v", source.Seed, nfsMountPath, err) return api.BadVolumeID, err } } } f, err := os.Create(path.Join(nfsMountPath, string(volumeID)+nfsBlockFile)) if err != nil { log.Println(err) return api.BadVolumeID, err } defer f.Close() err = f.Truncate(int64(spec.Size)) if err != nil { log.Println(err) return api.BadVolumeID, err } v := &api.Volume{ ID: api.VolumeID(volumeID), Source: source, Locator: locator, Ctime: time.Now(), Spec: spec, LastScan: time.Now(), Format: "nfs", State: api.VolumeAvailable, Status: api.Up, DevicePath: path.Join(nfsMountPath, string(volumeID)+nfsBlockFile), } err = d.CreateVol(v) if err != nil { return api.BadVolumeID, err } return v.ID, err }
func CmdClear(privateKey, session, environments, defaultEnv, pods bool, settings *models.Settings) error { if defaultEnv { logrus.Warnln("The \"--default\" flag has been deprecated! It will be removed in a future version.") } if privateKey { settings.PrivateKeyPath = "" } if session { settings.SessionToken = "" settings.UsersID = "" } if environments { settings.Environments = map[string]models.AssociatedEnv{} } if defaultEnv { settings.Default = "" } if pods { settings.Pods = &[]models.Pod{} } config.SaveSettings(settings) if !privateKey && !session && !environments && !defaultEnv && !pods { logrus.Println("No settings were specified. To see available options, run \"catalyze clear --help\"") } else { logrus.Println("All specified settings have been cleared") } return nil }
func saveFileonTempDir(filename string, sketch io.Reader) (path string, err error) { // create tmp dir tmpdir, err := ioutil.TempDir("", "arduino-create-agent") if err != nil { return "", errors.New("Could not create temp directory to store downloaded file. Do you have permissions?") } filename, _ = filepath.Abs(tmpdir + "/" + filename) output, err := os.Create(filename) if err != nil { log.Println("Error while creating", filename, "-", err) return filename, err } defer output.Close() n, err := io.Copy(output, sketch) if err != nil { log.Println("Error while copying", err) return filename, err } log.Println(n, "bytes saved") return filename, nil }
func (driver *Driver) AttachVolume(runAsync bool, volumeID, instanceID string) ([]*storage.VolumeAttachment, error) { if volumeID == "" { return nil, ErrMissingVolumeID } nextDeviceName, err := driver.GetDeviceNextAvailable() if err != nil { return nil, err } _, err = driver.EC2Instance.AttachVolume(volumeID, instanceID, nextDeviceName) if err != nil { return nil, err } if !runAsync { log.Println("Waiting for volume attachment to complete") err = driver.waitVolumeAttach(volumeID, instanceID) if err != nil { return nil, err } } volumeAttachment, err := driver.GetVolumeAttach(volumeID, instanceID) if err != nil { return nil, err } log.Println(fmt.Sprintf("Attached volume %s to instance %s", volumeID, instanceID)) return volumeAttachment, nil }
/* Remove a version, file or stash from the stash format: {stashid}/<filename><:version> will remove everything if asked */ func (self *Meta) RemoveFile(stash_node string) { node, file, exact := meta.Lookup(stash_node) log.Debugln("\n\n*** \nFound: ", file, "\n", exact, "\n***\n\n") if node != nil { if file != nil { if exact { log.Println("Removing file: ", file.Name, " version: ", file.Version, " from stash: ", node.Id) self.pullFromFiles(node, file) return } else { log.Println("Didn't find exact file, should I remove version:", file.Version, " [yes/No]") if Ask("no") { self.pullFromFiles(node, file) } return } return } else { log.Println("Asked to remove entire stash... are you sure? [yes/No]") if Ask("no") { self.pullFromFiles(node, nil) os.Remove(config.Stash_loc + "/" + node.Id) } return } } log.Println("Unable to find file to remove") return }
// spawn spawns the given filename in background using syscall func spawn(name string) { filepath := path.Join("/init/services", name) stdinpath := path.Join("/logs/", name+".stdin") stdoutpath := path.Join("/logs/", name+".stdout") stderrpath := path.Join("/logs/", name+".stderr") os.MkdirAll(path.Dir(stdinpath), 0777) os.MkdirAll(path.Dir(stdoutpath), 0777) os.MkdirAll(path.Dir(stderrpath), 0777) fstdin, err := os.Create(stdinpath) if err != nil { log.Println("waat", err) } fstdout, err := os.Create(stdoutpath) if err != nil { log.Println("waat", err) } fstderr, err := os.Create(stderrpath) if err != nil { log.Println("waat", err) } // Open Files for stdout, stderr procAttr := &syscall.ProcAttr{ Dir: "/", Env: []string{"MYVAR=345"}, Files: []uintptr{fstdin.Fd(), fstdout.Fd(), fstderr.Fd()}, Sys: nil, } pid, err := syscall.ForkExec(filepath, nil, procAttr) if err != nil { log.WithFields(log.Fields{ "service": filepath, "error": err, }).Error("Could not start service.") } else { log.WithFields(log.Fields{ "service": filepath, "pid": pid, }).Info("Started service succesfully") } log.Info("Waiting for 3 seconds") time.Sleep(3 * time.Second) a, err1 := ioutil.ReadFile(stdoutpath) b, err2 := ioutil.ReadFile(stderrpath) if err1 != nil || err2 != nil { log.Error("Could not read", err1, err2) } else { log.WithFields(log.Fields{ "service": name, "stdout": string(a), "stderr": string(b), }).Info("Service ended.") } }
func addToCorpus(seeds [][]string) { corpus := pool.Get() defer corpus.Close() for i, seed := range seeds { cut := len(seed) - 1 key := strings.Join(seed[:cut], separator) value := seed[cut:][0] _, err := corpus.Do("SADD", key, value) if err != nil { redisErr(err) return } if config.Debug { log.Println("seed #" + fmt.Sprint(i) + ":\t" + dump(seed)) chainValues, err := redis.Strings(corpus.Do("SMEMBERS", key)) if err != nil { redisErr(err) return } log.Println("corpus #" + fmt.Sprint(i) + ":\t" + dump(chainValues)) } } }
// A propose for a view change would come on current view + sth // when we receive view change message on a future view, // we must be caught up, create that view and apply actions on it func (sn *Node) Propose(view int, am *AnnouncementMessage, from string) error { log.Println(sn.Name(), "GOT ", "Propose", am) if err := sn.SetupProposal(view, am, from); err != nil { return err } if err := sn.setUpRound(view, am); err != nil { return err } // log.Println(sn.Name(), "propose on view", view, sn.HostListOn(view)) sn.Rounds[am.Round].Vote = am.Vote // Inform all children of proposal messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) for i := range messgs { sm := SigningMessage{ Type: Announcement, View: view, LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), Am: am} messgs[i] = &sm } ctx := context.TODO() //ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) if err := sn.PutDown(ctx, view, messgs); err != nil { return err } if len(sn.Children(view)) == 0 { log.Println(sn.Name(), "no children") sn.Promise(view, am.Round, nil) } return nil }
func (i *Inverters) StartPoller() { log.Println("Beginning poller") for { inverters.Lock() defer inverters.Unlock() var keys []string for k := range inverters.m { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { // log.Println("polling:key:", k, "Value:", inverters.m[k]) if inverters.m[k].authed == false { log.Println("Skipping poll of unauthed inveter:", inverters.m[k]) } } inverters.Unlock() time.Sleep(30 * time.Second) } }
func (gh *groupHandler) fetch() error { key := fmt.Sprintf("dagger/%s/publishers_leader", gh.compID) kv := gh.c.client.KV() qOpts := &api.QueryOptions{WaitIndex: gh.lastIndex} // determine if we should do a short poll in case a leader's not chosen yet gh.RLock() if gh.currentLeader == "" { qOpts.WaitTime = time.Second } gh.RUnlock() pair, queryMeta, err := kv.Get(key, qOpts) log.Println("[coordinator][groupHandler] Fetch returned new data") if err != nil { log.Println("FETCH ERROR") return err } gh.Lock() gh.lastIndex = queryMeta.LastIndex if pair == nil || pair.Session == "" { gh.currentLeader = "" gh.areWeLeader = false } else { gh.currentLeader = string(pair.Value) gh.areWeLeader = (gh.currentLeader == gh.c.addr.String()) } gh.Unlock() log.Println("[coordinator] New leader:", gh.currentLeader) return nil }