func (c *consulCoordinator) EnsurePublisherNum(topic StreamID, n int, stop chan struct{}) chan error { prefix := fmt.Sprintf("dagger/publishers/%s", topic) lastNumPublishers := -1 kv := c.client.KV() new, errc := c.watchSet(prefix, nil) go func() { for { select { case <-stop: return case keys := <-new: // if there are no publishers registered, post a new job if len(keys) != lastNumPublishers && len(keys) < 2 { log.Printf("[coordinator] Number of publishers of %s is %d, posting a job.", topic, len(keys)) // log.Println("Publishers: ", keys) pair := &api.KVPair{ Key: taskPrefix + string(topic), } _, err := kv.Put(pair, nil) if err != nil { errc <- fmt.Errorf("consul error: %s", err) } } else { // FIXME: do this more elegantly if len(keys) == 2 { log.Printf("[coordinator] Number of publishers of %s is %d, not posting a job.", topic, len(keys)) // log.Println("Publishers: ", keys) } } lastNumPublishers = len(keys) } } }() return errc }
func loadAsset(path, defaultValue string) string { devPath := fmt.Sprintf( "%s/src/github.com/emccode/rexray/daemon/module/admin/html/%s", os.Getenv("GOPATH"), path) if util.FileExists(devPath) { v, _ := ioutil.ReadFile(devPath) log.Printf("Loaded %s from %s\n", path, devPath) return string(v) } exeDir, _, _ := util.GetThisPathParts() relPath := fmt.Sprintf( "%s/html/%s", exeDir, path) if util.FileExists(relPath) { v, _ := ioutil.ReadFile(devPath) log.Printf("Loaded %s from %s\n", path, relPath) return string(v) } return defaultValue }
//StatsListener Listen for requests to serve server stats func (s *StatsMgr) StatsListener(ip net.IP, port int, ctrlChan chan int) { s.wg.Add(1) go func() { r := mux.NewRouter() r.HandleFunc("/api/v1/stats/all", s.StatsAllJSON) svr := &http.Server{} svr.Handler = r tcpaddr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(ip.String(), strconv.Itoa(port))) if err != nil { log.Error("Stats Listener ", err) return } l, err := net.ListenTCP("tcp", tcpaddr) log.Printf("%s now listening on %s for incoming connections", strings.Join([]string{AppName, "Stats Listener"}, " "), tcpaddr.String()) if err != nil { log.Error("Stats Listener ", err) return } if err := svr.Serve(l); err != nil { log.Errorln(err) } for item := range ctrlChan { if item == -1 { err = l.Close() if err != nil { log.Println(err) } s.wg.Done() log.Printf("%s shutting down", strings.Join([]string{AppName, "Stats Listener"}, " ")) return } } }() }
// Check and update if a newer source exist for the package. func (b *Builder) updatePkgSrc(pkg *SrcPkg) (*SrcPkg, error) { p := pkg.PKGBUILD if len(p.Pkgnames) > 1 || p.Pkgnames[0] != p.Pkgbase { log.Printf("Checking for new version of %s:(%s)", p.Pkgbase, strings.Join(p.Pkgnames, ", ")) } else { log.Printf("Checking for new version of %s", p.Pkgbase) } err := runCmd(pkg.Path, nil, "makepkg", "--nobuild", "--nodeps", "--noprepare", "--noconfirm") if err != nil { return nil, err } cmd := exec.Command("mksrcinfo") cmd.Dir = pkg.Path err = cmd.Run() if err != nil { return nil, err } filePath := path.Join(pkg.Path, ".SRCINFO") pkgb, err := pkgbuild.ParseSRCINFO(filePath) if err != nil { return nil, err } pkg.PKGBUILD = pkgb return pkg, nil }
func CmdDeploy(svcName, target string, iw IWorker, is services.IServices, ij jobs.IJobs) error { service, err := is.RetrieveByLabel(svcName) if err != nil { return err } if service == nil { return fmt.Errorf("Could not find a service with the label \"%s\". You can list services with the \"catalyze services list\" command.", svcName) } logrus.Printf("Initiating a worker for service %s (procfile target = \"%s\")", svcName, target) workers, err := iw.Retrieve(service.ID) if err != nil { return err } if _, ok := workers.Workers[target]; ok { logrus.Printf("Worker with target %s for service %s is already running, deploying another worker", target, svcName) } workers.Workers[target]++ err = iw.Update(service.ID, workers) if err != nil { return err } err = ij.DeployTarget(target, service.ID) if err != nil { return err } logrus.Printf("Successfully deployed a worker for service %s with target %s", svcName, target) return nil }
// HandleError parses an AWS error and exits the program func HandleError(err error) { if err == nil { return } if awsErr, ok := err.(awserr.Error); ok { log.Print("Code: " + awsErr.Code()) log.Print("Message: " + awsErr.Message()) if awsErr.OrigErr() != nil { log.Printf("Orginal Error: %v", awsErr.OrigErr()) } if reqErr, ok := err.(awserr.RequestFailure); ok { log.Printf("Status Code: %d", reqErr.StatusCode()) if reqErr.RequestID() != "" { log.Print("Request ID: " + reqErr.RequestID()) } } } else { log.Print(err.Error()) } os.Exit(1) }
func spProgramRW(portname string, boardname string, filePath string, commandline string, extraInfo boardExtraInfo) { compiling = true defer func() { time.Sleep(1500 * time.Millisecond) compiling = false }() var err error if extraInfo.Network { err = spProgramNetwork(portname, boardname, filePath, extraInfo.Auth) if err != nil { // no http method available, try ssh upload err = spProgramSSHNetwork(portname, boardname, filePath, commandline, extraInfo.Auth) } } else { err = spProgramLocal(portname, boardname, filePath, commandline, extraInfo) } if err != nil { log.Printf("Command finished with error: %v", err) mapD := map[string]string{"ProgrammerStatus": "Error", "Msg": "Could not program the board"} mapB, _ := json.Marshal(mapD) h.broadcastSys <- mapB } else { log.Printf("Finished without error. Good stuff") mapD := map[string]string{"ProgrammerStatus": "Done", "Flash": "Ok"} mapB, _ := json.Marshal(mapD) h.broadcastSys <- mapB // analyze stdin } }
func CmdBackup(databaseName string, skipPoll bool, id IDb, is services.IServices, ij jobs.IJobs) error { service, err := is.RetrieveByLabel(databaseName) if err != nil { return err } if service == nil { return fmt.Errorf("Could not find a service with the label \"%s\". You can list services with the \"catalyze services\" command.", databaseName) } job, err := id.Backup(service) if err != nil { return err } logrus.Printf("Backup started (job ID = %s)", job.ID) if !skipPoll { // all because logrus treats print, println, and printf the same logrus.StandardLogger().Out.Write([]byte("Polling until backup finishes.")) status, err := ij.PollTillFinished(job.ID, service.ID) if err != nil { return err } job.Status = status logrus.Printf("\nEnded in status '%s'", job.Status) err = id.DumpLogs("backup", job, service) if err != nil { return err } if job.Status != "finished" { return fmt.Errorf("Job finished with invalid status %s", job.Status) } } logrus.Printf("You can download your backup with the \"catalyze db download %s %s ./output_file_path\" command", databaseName, job.ID) return nil }
func setupSSHListener(port string, hostKey string) (ssh.ServerConfig, net.Listener) { sshConfig := &ssh.ServerConfig{ PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { remoteAddr := c.RemoteAddr().String() ip := remoteAddr[0:strings.Index(remoteAddr, ":")] log.Printf("SSH connection from ip=[%s], username=[%s], password=[%s], version=[%s]", ip, c.User(), pass, c.ClientVersion()) return nil, fmt.Errorf("invalid credentials") }, } privateBytes, err := ioutil.ReadFile(hostKey) if err != nil { log.Fatalf("Failed to load private key %s. Run make gen_ssh_key %s", hostKey, hostKey) } private, err := ssh.ParsePrivateKey(privateBytes) if err != nil { log.Fatal("Failed to parse private key") } sshConfig.AddHostKey(private) portComplete := fmt.Sprintf(":%s", port) listener, err := net.Listen("tcp4", portComplete) if err != nil { log.Fatalf("failed to listen on *:%s", port) } log.Printf("listening on %s", port) return *sshConfig, listener }
func LineByLinePipe(cb func(string)) io.Writer { r, w, err := os.Pipe() if err != nil { panic(err) } pipeId := RandomNumericString(5) go func() { log.Printf("[pipe:%s] Pipe created", pipeId) reader := bufio.NewReader(r) msg := "" for { bytes, ispr, err := reader.ReadLine() if err != nil && err != io.EOF { panic(err) } msg += string(bytes) log.Print() if ispr { continue } line := msg msg = "" cb(line) if err == io.EOF { log.Printf("[pipe:%s] Pipe closed", pipeId) break } } }() return w }
func main() { kingpin.Parse() me := ec2metadata.New(session.New(), &aws.Config{}) region, err := me.Region() if err != nil { fmt.Println(err) os.Exit(1) } cw := cloudwatch.New(session.New(&aws.Config{Region: aws.String(region)})) as := autoscaling.New(session.New(&aws.Config{Region: aws.String(region)})) // Get the name of this instance. instance, err := me.GetMetadata("instance-id") if err != nil { fmt.Println(err) os.Exit(1) } log.Printf("Instance: %s", instance) // Check if this instance is in an auto scaling group. gps, err := as.DescribeAutoScalingInstances(&autoscaling.DescribeAutoScalingInstancesInput{ InstanceIds: []*string{ aws.String(instance), }, }) if err == nil && len(gps.AutoScalingInstances) > 0 { group = *gps.AutoScalingInstances[0].AutoScalingGroupName log.Printf("AutoScaling group: %s", group) } // Loop and send to the backend. limiter := time.Tick(time.Second * 60) for { <-limiter // Submit all the values. for _, mn := range strings.Split(*cliMetrics, ",") { m, err := metric.New(mn) if err != nil { log.Printf("Cannot find metric: %s" + mn) continue } v, err := m.Value() if err != nil { log.Println("Cannot get metric.") } // Send the instance metrics. Send(cw, "InstanceId", instance, m.Name(), v) // Send the autoscaling. if group != "" { Send(cw, "AutoScalingGroupName", group, m.Name(), v) } } } }
func (r *Retain) UnmarshalRQL(data interface{}) error { log.Printf("data %v %T", data, data) //var rData map[string]interface{} rData, ok := data.(map[string]interface{}) if !ok { return fmt.Errorf("pseudo-type Retain object is not valid") } log.Printf("[DEBUG] %T - %v\n", rData, rData) //if rData, ok := rData.(*retain); ok { topic, ok := rData["topic"].(string) if !ok { return fmt.Errorf("Topic is not valid string") } r.Topic = topic newMsg := &proto.Publish{} r.Msg = newMsg msg, ok := rData["msg"].(map[string]interface{}) if !ok { return fmt.Errorf("Msg is not valid map[string]interface{} type") } //r.Msg = &msg log.Printf("msg %T - %v\n", msg, msg) log.Printf("msg %T - %v\n", msg["TopicName"], msg["TopicName"]) r.Msg.TopicName = msg["TopicName"].(string) qos := msg["QosLevel"].(float64) //switch? if qos == 0 { r.Msg.QosLevel = proto.QosAtMostOnce } else if qos == 1 { r.Msg.QosLevel = proto.QosAtLeastOnce } else if qos == 2 { r.Msg.QosLevel = proto.QosExactlyOnce } else { return fmt.Errorf("QosLevel is not valid QosLevel type") } r.Msg.DupFlag = msg["DupFlag"].(bool) r.Msg.Retain = msg["Retain"].(bool) msgID := msg["MessageId"].(float64) r.Msg.MessageId = uint16(msgID) log.Printf("payload %v", msg["Payload"]) payloadArray := msg["Payload"].([]uint8) w := bytes.NewBuffer(payloadArray) pay := &proto.BytesPayload{} r.Msg.Payload = pay r.Msg.Payload.WritePayload(w) return nil }
func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil { if !quiet { log.Printf("WARNING: %s\n", err) } } else { _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) sysInfo.MemoryLimit = err1 == nil && err2 == nil if !sysInfo.MemoryLimit && !quiet { log.Printf("WARNING: Your kernel does not support cgroup memory limit.") } _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) sysInfo.SwapLimit = err == nil if !sysInfo.SwapLimit && !quiet { log.Printf("WARNING: Your kernel does not support cgroup swap limit.") } } // Check if AppArmor seems to be enabled on this system. if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) { sysInfo.AppArmor = false } else { sysInfo.AppArmor = true } return sysInfo }
func (f *SFiles) Save(output string, force bool, file *models.ServiceFile) error { filePerms, err := strconv.ParseUint(file.Mode, 8, 32) if err != nil { filePerms = 0644 } var wr io.Writer if output != "" { if force { os.Remove(output) } outFile, err := os.OpenFile(output, os.O_CREATE|os.O_RDWR, os.FileMode(filePerms)) if err != nil { logrus.Printf("Warning! Could not apply %s file permissions. Attempting to apply defaults %s", fileModeToRWXString(filePerms), fileModeToRWXString(uint64(0644))) outFile, err = os.OpenFile(output, os.O_CREATE|os.O_RDWR, 0644) if err != nil { return fmt.Errorf("Could not open %s for writing: %s", output, err.Error()) } } defer outFile.Close() wr = outFile } else { logrus.Printf("Mode: %s\n\nContent:", fileModeToRWXString(filePerms)) wr = os.Stdout } wr.Write([]byte(file.Contents)) return nil }
func CmdDownload(databaseName, backupID, filePath string, force bool, id IDb, ip prompts.IPrompts, is services.IServices) error { err := ip.PHI() if err != nil { return err } if !force { if _, err := os.Stat(filePath); err == nil { return fmt.Errorf("File already exists at path '%s'. Specify `--force` to overwrite", filePath) } } else { os.Remove(filePath) } service, err := is.RetrieveByLabel(databaseName) if err != nil { return err } if service == nil { return fmt.Errorf("Could not find a service with the label \"%s\". You can list services with the \"catalyze services\" command.", databaseName) } err = id.Download(backupID, filePath, service) if err != nil { return err } logrus.Printf("%s backup downloaded successfully to %s", databaseName, filePath) logrus.Printf("You can also view logs for this backup with the \"catalyze db logs %s %s\" command", databaseName, backupID) return nil }
func CmdUpdate(iu IUpdate) error { logrus.Println("Checking for available updates...") needsUpdate, err := iu.Check() if err != nil { return err } // check if we can overwrite exe if needsUpdate && (runtime.GOOS == "linux" || runtime.GOOS == "darwin") { err = verifyExeDirWriteable() if err != nil { return err } } if !needsUpdate { logrus.Println("You are already running the latest version of the Catalyze CLI") return nil } logrus.Printf("Version %s is available. Updating your CLI...", updater.AutoUpdater.Info.Version) err = iu.Update() if err != nil { return err } iu.UpdatePods() logrus.Printf("Your CLI has been updated to version %s", updater.AutoUpdater.Info.Version) return nil }
// loadConfiguration loads the configuration of application func loadConfiguration(app *AppConfig, rdis *RedisConfig, nats *NatsConfig) { err := envconfig.Process(ServiceName, app) if err != nil { log.Panicln(err) } err = envconfig.Process("redis", rdis) if err != nil { log.Panicln(err) } err = envconfig.Process("nats", nats) if err != nil { log.Panicln(err) } if len(os.Getenv(KeyLogly)) > 0 { log.Printf("Loading logly token %s \n", os.Getenv(KeyLogly)) hook := logrusly.NewLogglyHook(os.Getenv(KeyLogly), app.Host, log.InfoLevel, app.Name) log.AddHook(hook) } log.Println("#### LOADED CONFIG #####") log.Printf("REDIS_URI: %s \n", rdis.URI) log.Printf("NATS_ENDPOINT: %s \n", nats.Endpoint) }
func test14() { // test func pointer t := &Test14Struct{} t.x = 66666 test14UseFunc(t.printX) log.Printf("after run func: %d\n", t.x) log.Printf("struct in main: %p\n", &t) } // pass the copy of struct to func!!!!
func EnableLogfile(logfileName string) *os.File { if logfileName == "" { log.Printf("logfile is STDOUT") return nil } log.Printf("logfile is %s", logfileName) logFile := logfileName logfileNameSlice := strings.Split(logfileName, string(filepath.Separator)) //relative path if len(logfileNameSlice) > 1 && logfileNameSlice[0] != "" { logFile = CurrDirectory + string(filepath.Separator) + logfileName } //try to create log folder if len(logfileNameSlice) > 1 { logfileNameSlice = logfileNameSlice[:len(logfileNameSlice)-1] logPath := strings.Join(logfileNameSlice, string(filepath.Separator)) os.Mkdir(logPath, 0777) } f, err := os.OpenFile(logFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatalf("error opening file: %v", err) } log.SetFormatter(&log.JSONFormatter{}) log.SetOutput(f) Out = f return f }
func RunServerWithConf(conf RWConf) { for path, service := range conf.Services { err := service.Initialise() if err != nil { log.Fatalf("Service for path %s could not startup, err=%s", path, err) } } if conf.Env != "local" { f, err := os.OpenFile("/var/log/apps/"+conf.ServiceName+"-go-app.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) if err == nil { log.SetOutput(f) log.SetFormatter(&log.TextFormatter{DisableColors: true}) } else { log.Fatalf("Failed to initialise log file, %v", err) } defer f.Close() } var m http.Handler m = router(conf.Services, conf.HealthHandler) if conf.EnableReqLog { m = httphandlers.TransactionAwareRequestLoggingHandler(log.StandardLogger(), m) } m = httphandlers.HTTPMetricsHandler(metrics.DefaultRegistry, m) http.Handle("/", m) log.Printf("listening on %d", conf.Port) log.Println(http.ListenAndServe(fmt.Sprintf(":%d", conf.Port), nil).Error()) log.Printf("exiting on %s", conf.ServiceName) }
func gen(c *cli.Context) { if len(c.Args()) != 1 { cli.ShowCommandHelp(c, "gen") return } var srcFile = c.Args()[0] if err := GenerateSampleSolution(srcFile); err != nil { log.Fatalf("Failed to generate sample solution: %s", err) } // At this point we know srcFile contains a valid extension ext := filepath.Ext(srcFile)[1:] settings, err := ReadKeyValueYamlFile(".settings.yml") if err != nil { log.Printf("Failed to read settings file: %s\n", err) settings = make(map[string]interface{}) } settings["lang"] = ext settings["src_file"] = filepath.Base(srcFile) settingsFile := ".settings.yml" if dir := filepath.Dir(srcFile); dir != "" { settingsFile = dir + "/" + settingsFile } if err = WriteKeyValueYamlFile(settingsFile, settings); err != nil { log.Printf("Failed to write settings file: %s\n", err) } }
func BuildPipelines(store data.Nodeinfostore, receiver announced.AnnouncedPacketReceiver, pipeEnd func(response data.ParsedResponse)) ([]io.Closer, error) { closeables := make([]io.Closer, 0, 2) receivePipeline := pipeline.NewReceivePipeline(&pipeline.JsonParsePipe{}, &pipeline.DeflatePipe{}) processPipe := pipeline.NewProcessPipeline(getProcessPipes(store)...) closeables = append(closeables, receivePipeline, processPipe) log.Printf("Adding process pipe end") go func() { processPipe.Dequeue(pipeEnd) }() log.Printf("Connecting requester to receive pipeline") go func() { receiver.Receive(func(response announced.Response) { receivePipeline.Enqueue(response) }) }() log.Printf("Connecting receive to process pipeline") //Connect the receive to the process pipeline go func() { receivePipeline.Dequeue(func(response data.ParsedResponse) { processPipe.Enqueue(response) }) }() return closeables, nil }
// DumpLogs dumps logs from a Backup/Restore/Import/Export job to the console func (d *SDb) DumpLogs(taskType string, job *models.Job, service *models.Service) error { logrus.Printf("Retrieving %s logs for job %s...", service.Label, job.ID) tempURL, err := d.TempLogsURL(job.ID, service.ID) if err != nil { return err } dir, err := ioutil.TempDir("", "") if err != nil { return err } encrFile, err := ioutil.TempFile(dir, "") if err != nil { return err } resp, err := http.Get(tempURL.URL) if err != nil { return err } defer resp.Body.Close() io.Copy(encrFile, resp.Body) encrFile.Close() plainFile, err := ioutil.TempFile(dir, "") if err != nil { return err } // do we have to close this before calling DecryptFile? // or can two processes have a file open simultaneously? plainFile.Close() if taskType == "backup" { logsKey := job.Backup.KeyLogs if logsKey == "" { logsKey = job.Backup.Key } err := d.Crypto.DecryptFile(encrFile.Name(), logsKey, job.Backup.IV, plainFile.Name()) if err != nil { return err } } else if taskType == "restore" { logsKey := job.Restore.KeyLogs if logsKey == "" { logsKey = job.Restore.Key } err := d.Crypto.DecryptFile(encrFile.Name(), logsKey, job.Restore.IV, plainFile.Name()) if err != nil { return err } } logrus.Printf("-------------------------- Begin %s logs --------------------------", service.Label) plainFile, _ = os.Open(plainFile.Name()) io.Copy(os.Stdout, plainFile) plainFile.Close() logrus.Printf("--------------------------- End %s logs ---------------------------", service.Label) os.Remove(encrFile.Name()) os.Remove(plainFile.Name()) os.Remove(dir) return nil }
// udpListen starts listening for udp packets on the configured port. func (s *Statsd) udpListen() error { defer s.wg.Done() var err error address, _ := net.ResolveUDPAddr("udp", s.Config.Address) s.listener, err = net.ListenUDP("udp", address) if err != nil { log.Fatalf("ERROR: ListenUDP - %s", err) } log.Println("Statsd listener listening on: ", s.listener.LocalAddr().String()) buf := make([]byte, UDP_MAX_PACKET_SIZE) for { select { case <-s.done: return nil default: n, _, err := s.listener.ReadFromUDP(buf) if err != nil && !strings.Contains(err.Error(), "closed network") { log.Printf("ERROR READ: %s\n", err.Error()) continue } bufCopy := make([]byte, n) copy(bufCopy, buf[:n]) select { case s.in <- bufCopy: default: log.Printf(dropwarn, string(buf[:n])) } } } }
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) { if expected.Usage != actual.Usage { logrus.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage) t.Fail() } if expected.MaxUsage != actual.MaxUsage { logrus.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage) t.Fail() } for key, expValue := range expected.Stats { actValue, ok := actual.Stats[key] if !ok { logrus.Printf("Expected memory stat key %s not found\n", key) t.Fail() } if expValue != actValue { logrus.Printf("Expected memory stat value %d but found %d\n", expValue, actValue) t.Fail() } } if expected.Failcnt != actual.Failcnt { logrus.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt) t.Fail() } }
func (s server) handleConn(c net.Conn) { client := fmt.Sprintf("Client %v", c.RemoteAddr()) logrus.Printf("%s: Connected", client) defer c.Close() stats := newLogger(client) defer close(stats) // for { var op [256]byte n, err := c.Read(op[:]) if err != nil { logrus.Errorf("%s: failure: %v", client, err) return } switch string(op[:n]) { case "bye": logrus.Printf("%s: goodbye", client) return case "upload": if err := speedtest.Consume(c, s.buffers, s.buffers, stats); err != nil { logrus.Errorf("%s: failed upload: %v", client, err) return } case "download": if err := speedtest.Provide(c, s.buffers, s.buffers, stats); err != nil { logrus.Errorf("%s: failed download: %v", client, err) return } } // } }
// writeLoop waits for Queries on a channel and writes the immediately to the // socket. func (r *Requester) writeLoop() { for query := range r.queryChan { queryString := query.QueryString targetAddr := query.TargetAddr if targetAddr == nil { targetAddr = announcedAddr } buf := []byte(queryString) count, err := r.unicastConn.WriteTo(buf, targetAddr) if count < len(buf) { log.Printf("Written less bytes (%d) than expected (%d)", count, len(buf)) log.WithFields(log.Fields{ "bytesWritten": count, "bytesExpected": len(buf), }).Error("Failed to write all bytes to unicast address") } if err != nil { log.Printf("Error while writing to MulticastGroup: %v", err) log.WithFields(log.Fields{ "multicastGroup": announcedAddr, "error": err, }).Error("Error writing to multicast group") } } }
func (fh *ForwardHandler) HandleAppEvent(w http.ResponseWriter, body []byte) { event, err := events.ParseEvent(body) if err != nil { w.WriteHeader(500) fmt.Fprintln(w, err.Error()) log.Printf("[ERROR] body generated error: %s", err.Error()) return } resp := "" respCode := 200 for _, app := range event.Apps() { err = fh.consul.UpdateApp(app) if err != nil { resp += err.Error() + "\n" log.Printf("[ERROR] response generated error: %s", err.Error()) respCode = 500 } } if resp == "" { resp = "OK\n" } w.WriteHeader(respCode) fmt.Fprint(w, resp) }
// SubmitRecord submits a new record into the worker process func (rpci *RPCHandler) SubmitRecord(t *Record, reply *string) error { rpci.r.subscribersLock.RLock() defer rpci.r.subscribersLock.RUnlock() log.Printf("[receiver] Received: %s", t) subscribers := make([]RecordProcessor, 0, len(rpci.r.subscribedRecordProcessors[t.StreamID])) for k := range rpci.r.subscribedRecordProcessors[t.StreamID] { subscribers = append(subscribers, k) } err := ProcessMultipleProcessors(subscribers, t) if err != nil { log.Printf("[ERROR] Processing %s failed: %s", t, err) return err } // if enough time has passed, create a checkpoint in coordination system select { case <-rpci.r.checkpointTimers[t.StreamID].C: log.Printf("[receiver] checkpointing position of stream %s at %s", t.StreamID, t.Timestamp) rpci.r.coordinator.CheckpointPosition(t.StreamID, t.Timestamp) rpci.r.checkpointTimers[t.StreamID].Reset(time.Second) default: } *reply = "ok" log.Printf("[receiver] ACKed: %s", t) return nil }
// Callback handles the oidc/oauth2 callback after a login attempt from the user. // If the idenity provider returned a proof for valid login, the userid is stored in the session. // This includes the model lookup and a possible creation for new users. // The users last login timestamp is updated. func (c *AuthController) Callback(successURL string) xhandler.HandlerC { return xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { log.Info("Handler: Callback") user, err := c.Provider.Callback(w, r) if err != nil { log.Printf("Error occurred: %s", err) http.Error(w, "Bad Request", http.StatusBadRequest) return } if user == nil { log.Printf("Error occurred uid is nil") http.Error(w, "Bad Request", http.StatusBadRequest) return } uuid := c.ProviderName + ":" + user["id"] u, err := c.loginUser(uuid, user["name"]) if err != nil { log.Warnf("Could not create new user: %s", err) http.Error(w, "Bad Request", http.StatusBadRequest) return } session := ctx.Value("session").(*sessions.Session) session.Values["user"] = u.ID session.Save(r, w) http.Redirect(w, r, successURL, http.StatusFound) }) }