// GetToken wraps the incoming username into a TokenStruct, serializes the result to json // and generates a Fernet token based on the resulting string func GetToken(username string) string { // If the configuration has changed, re-load the keys if confVersion != util.GetConfig().Version { loadMintKey() } claims := jwt.StandardClaims{ Issuer: "FoxAuthn", Subject: username, IssuedAt: time.Now().Unix(), ExpiresAt: time.Now().Add(time.Duration(util.GetConfig().Authn.TokenTTL) * time.Second).Unix(), } log.WithFields(log.Fields{ "claims": claims, }).Debug("Going to sign with these claims") token := jwt.NewWithClaims(jwt.SigningMethodRS384, claims) ss, err := token.SignedString(GetKey()) if err != nil { log.WithFields(log.Fields{ "path": mint.From, }).Panic("Failed to create signed token: ", err) } return ss }
func sshClientActivity(index int) { sc := sshConns[index] s := rand.NewSource(time.Now().UnixNano()) r := rand.New(s) // generate a random byte slice l := r.Intn(128) b := make([]byte, l) for i, _ := range b { b[i] = byte(r.Int()) } data := base64.StdEncoding.EncodeToString(b) log.Debug("ssh activity to %v with %v", sc.Host, data) start := time.Now().UnixNano() sc.Stdin.Write([]byte(data)) sc.Stdin.Write([]byte{'\r', '\n'}) sshReportChan <- uint64(len(data)) expected := fmt.Sprintf("> %v\r\n%v\r\n> ", data, data) for i := 0; i < 10 && sc.StdoutBuf.String() != expected; i++ { time.Sleep(100 * time.Millisecond) } stop := time.Now().UnixNano() log.Info("ssh %v %vns", sc.Host, uint64(stop-start)) log.Debugln("ssh: ", sc.StdoutBuf.String()) sc.StdoutBuf.Reset() }
func newRepo(name string, simpleDocker SimpleDocker.SimpleDocker) (Repo, error) { log.WithFields(log.Fields{ "name": name, }).Info("Creating new repo") r := Repo{ Name: name, SimpleDocker: simpleDocker, } startTime := time.Now() repoName := fmt.Sprintf("ihsw/%s", name) if err := r.pullImage(repoName); err != nil { log.WithFields(log.Fields{ "name": name, "err": err.Error(), "repoName": repoName, }).Warn("Could not pull image") return Repo{}, err } log.WithFields(log.Fields{ "name": name, "duration": fmt.Sprintf("%v", time.Now().Sub(startTime)), }).Info("Repo create success") return r, nil }
func work(c *replicant.Client, C <-chan time.Time, stop chan bool, done chan bool, dl *ygor.DataLogger) { defer func() { done <- true }() for { select { case <-C: break case <-stop: return } start := time.Now() _, err := c.Call("echo", "echo", []byte("hello world"), 0) end := time.Now() if err.Status == replicant.SUCCESS { when := uint64(end.UnixNano()) data := uint64(end.Sub(start).Nanoseconds()) er := dl.Record(1, when, data) if er != nil { fmt.Printf("error: %s\n", er) os.Exit(1) } } else { fmt.Printf("error: %s\n", err) os.Exit(1) } } }
// receive from leader func (self *NsqdCoordRpcServer) PutMessage(info *RpcPutMessage) *CoordErr { if self.nsqdCoord.enableBenchCost || coordLog.Level() >= levellogger.LOG_DEBUG { s := time.Now() defer func() { e := time.Now() if e.Sub(s) > time.Second*time.Duration(RPC_TIMEOUT/2) { coordLog.Infof("PutMessage rpc call used: %v, start: %v, end: %v", e.Sub(s), s, e) } coordLog.Warningf("PutMessage rpc call used: %v, start: %v, end: %v", e.Sub(s), s, e) }() } var ret CoordErr defer coordErrStats.incCoordErr(&ret) tc, err := self.nsqdCoord.checkWriteForRpcCall(info.RpcTopicData) if err != nil { ret = *err return &ret } // do local pub message err = self.nsqdCoord.putMessageOnSlave(tc, info.LogData, info.TopicMessage) if err != nil { ret = *err return &ret } return &ret }
func DefaultHandler(w http.ResponseWriter, r *http.Request) { log.Println("Serving index.html to ", r.RemoteAddr) tmpl, err := template.ParseFiles("templates/index.html") if err != nil { Write404(w, r) log.Println("error loading index.html!") return } if client, ok := clientList[r.RemoteAddr]; ok { client.LastUpdate = time.Now() clientList[r.RemoteAddr] = client } else { mutex.Lock() client := clientInfo{ NodeID: "SC-0.1-" + strconv.Itoa(GNodeID), Address: net.ParseIP(r.RemoteAddr), LastUpdate: time.Now(), } GNodeID++ mutex.Unlock() clientList[r.RemoteAddr] = client } tmpl.Execute(w, struct { NodeID string NumNodes int }{clientList[r.RemoteAddr].NodeID, len(clientList)}) }
// Ping sends a ping frame across the connection and // returns the response time func (s *Connection) Ping() (time.Duration, error) { pid := s.pingId s.pingIdLock.Lock() if s.pingId > 0x7ffffffe { s.pingId = s.pingId - 0x7ffffffe } else { s.pingId = s.pingId + 2 } s.pingIdLock.Unlock() pingChan := make(chan error) s.pingChans[pid] = pingChan defer delete(s.pingChans, pid) frame := &spdy.PingFrame{Id: pid} startTime := time.Now() writeErr := s.framer.WriteFrame(frame) if writeErr != nil { return time.Duration(0), writeErr } select { case <-s.closeChan: return time.Duration(0), errors.New("connection closed") case err, ok := <-pingChan: if ok && err != nil { return time.Duration(0), err } break } return time.Now().Sub(startTime), nil }
// New Go Routine based server with auth func RunServerWithAuth(opts *server.Options, auth server.Auth) *server.Server { if opts == nil { opts = &DefaultTestOptions } s := server.New(opts) if s == nil { panic("No NATS Server object returned.") } if auth != nil { s.SetAuthMethod(auth) } // Run server in Go routine. go s.Start() end := time.Now().Add(10 * time.Second) for time.Now().Before(end) { addr := s.Addr() if addr == nil { time.Sleep(50 * time.Millisecond) // Retry. We might take a little while to open a connection. continue } conn, err := net.Dial("tcp", addr.String()) if err != nil { // Retry after 50ms time.Sleep(50 * time.Millisecond) continue } conn.Close() return s } panic("Unable to start NATS Server in Go Routine") }
func (b *Broker) Loop(decorate bool) { b.start = time.Now() if decorate { go b.SendNoLog(Response{ Token: "margo.hello", Data: M{ "time": b.start.String(), }, }) } for { stopLooping := b.accept() if stopLooping { break } runtime.Gosched() } if decorate { b.SendNoLog(Response{ Token: "margo.bye-ni", Data: M{ "served": b.served, "uptime": time.Now().Sub(b.start).String(), }, }) } }
func controlService(name string, c svc.Cmd, to svc.State) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() s, err := m.OpenService(name) if err != nil { return fmt.Errorf("could not access service: %v", err) } defer s.Close() status, err := s.Control(c) if err != nil { return fmt.Errorf("could not send control=%d: %v", c, err) } timeout := time.Now().Add(10 * time.Second) for status.State != to { if timeout.Before(time.Now()) { return fmt.Errorf("timeout waiting for service to go to state=%d", to) } time.Sleep(300 * time.Millisecond) status, err = s.Query() if err != nil { return fmt.Errorf("could not retrieve service status: %v", err) } } return nil }
// UpdateActivation updates one build with the given id // to the given activation setting. func UpdateActivation(buildId string, active bool, caller string) error { var err error if !active && (evergreen.IsSystemActivator(caller)) { _, err = UpdateAllBuilds( bson.M{IdKey: buildId, ActivatedByKey: caller, }, bson.M{ "$set": bson.M{ ActivatedKey: active, ActivatedTimeKey: time.Now(), ActivatedByKey: caller, }, }, ) } else { _, err = UpdateAllBuilds( bson.M{IdKey: buildId}, bson.M{ "$set": bson.M{ ActivatedKey: active, ActivatedTimeKey: time.Now(), ActivatedByKey: caller, }, }, ) } return err }
func TestChunkerWithRandomPolynomial(t *testing.T) { // setup data source buf := getRandom(23, 32*1024*1024) // generate a new random polynomial start := time.Now() p, err := RandomPolynomial() if err != nil { t.Fatal(err) } t.Logf("generating random polynomial took %v", time.Since(start)) start = time.Now() ch := New(bytes.NewReader(buf), p) t.Logf("creating chunker took %v", time.Since(start)) // make sure that first chunk is different c, err := ch.Next(nil) if err != nil { t.Fatal(err.Error()) } if c.Cut == chunks1[0].CutFP { t.Fatal("Cut point is the same") } if c.Length == chunks1[0].Length { t.Fatal("Length is the same") } if bytes.Equal(hashData(c.Data), chunks1[0].Digest) { t.Fatal("Digest is the same") } }
func (t *timeoutTransport) RoundTrip(r *http.Request) (*http.Response, error) { if time.Now().After(t.Timeout) { t.Transport.CloseIdleConnections() t.Timeout = time.Now().Add(time.Minute * 5) } return t.Transport.RoundTrip(r) }
func (m *MonDB) SaveState(StartIndex int64) error { session, err := m.getSession() if err != nil { log.Print("DB connection error\n") return err } col := session.DB("").C("state") var needInsert bool state := MonDBState{} err = col.Find(nil).Sort("-updated").One(&state) if err != nil { needInsert = true } if !needInsert { qs := bson.M{"_id": state.Id} change := bson.M{"$set": bson.M{"start_index": StartIndex, "updated": time.Now().UTC()}} return col.Update(qs, change) } state.Id = bson.NewObjectId() state.StartIndex = StartIndex state.Created = time.Now().UTC() state.Updated = state.Created return col.Insert(state) }
func (s *Schedule) Lock(method string) { start := time.Now() s.mutex.Lock() s.mutexAquired = time.Now() s.mutexHolder = method s.mutexWaitTime = int64(s.mutexAquired.Sub(start) / time.Millisecond) // remember this so we don't have to call put until we leave the critical section. }
func (c *WebConn) writePump() { ticker := time.NewTicker(PING_PERIOD) defer func() { ticker.Stop() c.WebSocket.Close() }() for { select { case msg, ok := <-c.Send: if !ok { c.WebSocket.SetWriteDeadline(time.Now().Add(WRITE_WAIT)) c.WebSocket.WriteMessage(websocket.CloseMessage, []byte{}) return } c.WebSocket.SetWriteDeadline(time.Now().Add(WRITE_WAIT)) if err := c.WebSocket.WriteJSON(msg); err != nil { return } case <-ticker.C: c.WebSocket.SetWriteDeadline(time.Now().Add(WRITE_WAIT)) if err := c.WebSocket.WriteMessage(websocket.PingMessage, []byte{}); err != nil { return } } } }
func CreateDashboardSnapshot(cmd *m.CreateDashboardSnapshotCommand) error { return inTransaction(func(sess *xorm.Session) error { // never var expires = time.Now().Add(time.Hour * 24 * 365 * 50) if cmd.Expires > 0 { expires = time.Now().Add(time.Second * time.Duration(cmd.Expires)) } snapshot := &m.DashboardSnapshot{ Key: cmd.Key, DeleteKey: cmd.DeleteKey, OrgId: cmd.OrgId, UserId: cmd.UserId, External: cmd.External, Dashboard: cmd.Dashboard, Expires: expires, Created: time.Now(), Updated: time.Now(), } _, err := sess.Insert(snapshot) cmd.Result = snapshot return err }) }
func (c *WebConn) readPump() { defer func() { hub.Unregister(c) c.WebSocket.Close() }() c.WebSocket.SetReadLimit(MAX_SIZE) c.WebSocket.SetReadDeadline(time.Now().Add(PONG_WAIT)) c.WebSocket.SetPongHandler(func(string) error { c.WebSocket.SetReadDeadline(time.Now().Add(PONG_WAIT)) go func() { if result := <-Srv.Store.User().UpdateLastPingAt(c.UserId, model.GetMillis()); result.Err != nil { l4g.Error("Failed to updated LastPingAt for user_id=%v, err=%v", c.UserId, result.Err) } }() return nil }) for { var msg model.Message if err := c.WebSocket.ReadJSON(&msg); err != nil { return } else { msg.TeamId = c.TeamId msg.UserId = c.UserId PublishAndForget(&msg) } } }
func memStats() error { memstats := serverCmd.Flags().Lookup("memstats").Value.String() if memstats != "" { interval, err := time.ParseDuration(serverCmd.Flags().Lookup("meminterval").Value.String()) if err != nil { interval, _ = time.ParseDuration("100ms") } fileMemStats, err := os.Create(memstats) if err != nil { return err } fileMemStats.WriteString("# Time\tHeapSys\tHeapAlloc\tHeapIdle\tHeapReleased\n") go func() { var stats runtime.MemStats start := time.Now().UnixNano() for { runtime.ReadMemStats(&stats) if fileMemStats != nil { fileMemStats.WriteString(fmt.Sprintf("%d\t%d\t%d\t%d\t%d\n", (time.Now().UnixNano()-start)/1000000, stats.HeapSys, stats.HeapAlloc, stats.HeapIdle, stats.HeapReleased)) time.Sleep(interval) } else { break } } }() } return nil }
func (c *ConsulAlertClient) registerHealthCheck(key string, health *Check) { log.Printf( "Registering new health check: node=%s, service=%s, check=%s, status=%s", health.Node, health.ServiceName, health.Name, health.Status, ) var newStatus Status if health.Status == "passing" { newStatus = Status{ Current: health.Status, CurrentTimestamp: time.Now(), HealthCheck: health, } } else { newStatus = Status{ Pending: health.Status, PendingTimestamp: time.Now(), HealthCheck: health, } } statusData, _ := json.Marshal(newStatus) c.api.KV().Put(&consulapi.KVPair{Key: key, Value: statusData}, nil) }
func clockOut(usr user) error { fmt.Printf("Clocking user Out %s.\n", usr.ID) //Do other checking? punch, err := getLastTimepunch(usr.ID) if err != nil { return err } //If the last punch exists, has an in, but not an out, //complete the punch. if (!punch.In.Equal(time.Time{})) && (punch.Out.Equal(time.Time{})) { punch.Out = time.Now() punch.Duration = (punch.Out.Sub(punch.In)) err = updatePunch(punch) if err != nil { return err } } else { //in every other case, we just want to create a new punch. err = createPunch(timePunch{UID: usr.ID, Out: time.Now()}) if err != nil { return err } } fmt.Printf("Done.") return setUserStatus(usr.ID, false) }
func (self *FlumeClientPool) evict() { for self.running { select { case <-time.After(self.idletime): self.mutex.Lock() //池子中没有请求的时候做一下连接清理 if self.checkOutPool.Len() <= 0 { continue } for e := self.idlePool.Back(); nil != e; e = e.Prev() { idleclient := e.Value.(*IdleClient) //如果当前时间在过期时间之后并且活动的链接大于corepoolsize则关闭 isExpired := idleclient.expiredTime.Before(time.Now()) if isExpired && self.poolSize >= self.corepoolSize { idleclient.flumeclient.Destroy() idleclient = nil self.idlePool.Remove(e) //并且该表当前的active数量 self.poolSize-- } else if isExpired { //过期的但是已经不够corepoolsize了直接重新设置过期时间 idleclient.expiredTime = time.Now().Add(self.idletime) } else { //活动的数量小于corepool的则修改存活时间 idleclient.expiredTime = time.Now().Add(self.idletime) } } self.mutex.Unlock() } } }
func (bq *baseQueue) processOne(clock *hlc.Clock) { start := time.Now() bq.Lock() repl := bq.pop() bq.Unlock() if repl != nil { now := clock.Now() if log.V(1) { log.Infof("processing replica %s from %s queue...", repl, bq.name) } // If the queue requires a replica to have the range leader lease in // order to be processed, check whether this replica has leader lease // and renew or acquire if necessary. if bq.impl.needsLeaderLease() { // Create a "fake" get request in order to invoke redirectOnOrAcquireLease. args := &proto.GetRequest{RequestHeader: proto.RequestHeader{Timestamp: now}} if err := repl.redirectOnOrAcquireLeaderLease(nil /* Trace */, args.Header().Timestamp); err != nil { if log.V(1) { log.Infof("this replica of %s could not acquire leader lease; skipping...", repl) } return } } if err := bq.impl.process(now, repl); err != nil { log.Errorf("failure processing replica %s from %s queue: %s", repl, bq.name, err) } else if log.V(2) { log.Infof("processed replica %s from %s queue in %s", repl, bq.name, time.Now().Sub(start)) } } }
func main() { configFile := flag.String("config", "benchmark_config.sample.toml", "Config file") runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() data, err := ioutil.ReadFile(*configFile) if err != nil { panic(err) } var conf benchmarkConfig if _, err := toml.Decode(string(data), &conf); err != nil { panic(err) } logFile, err := os.OpenFile(conf.LogFile, os.O_RDWR|os.O_CREATE, 0660) if err != nil { panic(fmt.Sprintf("Error opening log file \"%s\": %s", conf.LogFile, err)) } conf.Log = logFile defer logFile.Close() fmt.Println("Logging benchmark results to ", conf.LogFile) logFile.WriteString("Starting benchmark run...\n") harness := NewBenchmarkHarness(&conf) startTime := time.Now() harness.Run() elapsed := time.Now().Sub(startTime) message := fmt.Sprintf("Finished in %.3f seconds\n", elapsed.Seconds()) fmt.Printf(message) logFile.WriteString(message) }
func (self *NsqdCoordRpcServer) PutMessages(info *RpcPutMessages) *CoordErr { if coordLog.Level() >= levellogger.LOG_DEBUG { s := time.Now().Unix() defer func() { e := time.Now().Unix() if e-s > int64(RPC_TIMEOUT/2) { coordLog.Infof("PutMessages rpc call used: %v", e-s) } }() } var ret CoordErr defer coordErrStats.incCoordErr(&ret) tc, err := self.nsqdCoord.checkWriteForRpcCall(info.RpcTopicData) if err != nil { ret = *err return &ret } // do local pub message err = self.nsqdCoord.putMessagesOnSlave(tc, info.LogData, info.TopicMessages) if err != nil { ret = *err return &ret } return &ret }
func (self *BenchmarkHarness) handleWrites(s *server) { clientConfig := &influxdb.ClientConfig{ Host: s.ConnectionString, Database: self.Config.ClusterCredentials.Database, Username: self.Config.ClusterCredentials.User, Password: self.Config.ClusterCredentials.Password, IsSecure: self.Config.ClusterCredentials.IsSecure, HttpClient: NewHttpClient(self.Config.ClusterCredentials.Timeout.Duration, self.Config.ClusterCredentials.SkipVerify), } client, err := influxdb.NewClient(clientConfig) if err != nil { panic(fmt.Sprintf("Error connecting to server \"%s\": %s", s.ConnectionString, err)) } for { write := <-self.writes startTime := time.Now() err := client.WriteSeries(write.Series) microsecondsTaken := time.Now().Sub(startTime).Nanoseconds() / 1000 if err != nil { self.reportFailure(&failureResult{write: write, err: err, microseconds: microsecondsTaken}) } else { self.reportSuccess(&successResult{write: write, microseconds: microsecondsTaken}) } } }
func (this *SrcHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { tmaster := time.Now() switch r.Method { // Likely faster not to use a map[string]func. case "GET": if strings.Index(r.URL.Path, common.SrcsPath) != 0 { handlerutils.HttpError(w, "Bad path: "+r.URL.Path, http.StatusBadRequest) return } this.getHandler(w, r) case "POST": if strings.Index(r.URL.Path, common.SrcPath) != 0 { handlerutils.HttpError(w, "Bad path: "+r.URL.Path, http.StatusBadRequest) return } src := r.URL.Path[len(common.SrcPath):] this.postHandler(w, r, src) case "PUT": if strings.Index(r.URL.Path, common.SrcPath) != 0 { handlerutils.HttpError(w, "Bad path: "+r.URL.Path, http.StatusBadRequest) return } src := r.URL.Path[len(common.SrcPath):] this.putHandler(w, r, src) default: handlerutils.HttpError(w, "Bad method: "+r.Method, http.StatusBadRequest) return } glog.V(2).Infof("PERF: total service time: %v\n", time.Now().Sub(tmaster)) }
func Create(userId, objectId, parentCommentId int, comment, objectType string) ObjectComment { objectComment := ObjectComment{} model.Db.Where("is_deleted = 1").Find(&objectComment) if objectComment == (ObjectComment{}) { objectComment = ObjectComment{ ObjectId: objectId, ObjectType: objectType, Comment: comment, UserId: userId, ParentId: parentCommentId, CreatedAt: time.Now(), UpdatedAt: time.Now(), } model.Db.Create(&objectComment) } else { objectComment.ObjectId = objectId objectComment.ObjectType = objectType objectComment.UserId = userId objectComment.ParentId = parentCommentId objectComment.Comment = comment objectComment.IsDeleted = 0 objectComment.CreatedAt = time.Now() objectComment.UpdatedAt = time.Now() model.Db.Save(&objectComment) } return objectComment }
func handleCient(conn net.Conn) { conn.SetDeadline(time.Now().Add(2 * time.Minute)) request := make([]byte, 128) defer conn.Close() for { read_len, err := conn.Read(request) if err != nil { fmt.Fprintf(os.Stderr, "error: %s", err) break } fmt.Printf(string(read_len)) // if read_len == 0 { // break // } else if string(request) == "timestamp" { daytime := time.Now().String() conn.Write([]byte(daytime)) // } } request = make([]byte, 128) }
func TestCollect(t *testing.T) { cm := &collectorManager{} firstTime := time.Now().Add(-time.Hour) secondTime := time.Now().Add(time.Hour) f1 := &fakeCollector{ nextCollectionTime: firstTime, } f2 := &fakeCollector{ nextCollectionTime: secondTime, } assert := assert.New(t) assert.NoError(cm.RegisterCollector(f1)) assert.NoError(cm.RegisterCollector(f2)) // First collection, everyone gets collected from. nextTime, _, err := cm.Collect() assert.Equal(firstTime, nextTime) assert.NoError(err) assert.Equal(1, f1.collectedFrom) assert.Equal(1, f2.collectedFrom) f1.nextCollectionTime = time.Now().Add(2 * time.Hour) // Second collection, only the one that is ready gets collected from. nextTime, _, err = cm.Collect() assert.Equal(secondTime, nextTime) assert.NoError(err) assert.Equal(2, f1.collectedFrom) assert.Equal(1, f2.collectedFrom) }