// mergeDelta behaves like Merge but returns a gossipData only containing // things that have changed. func (gd gossipData) mergeDelta(od gossipData) gossipData { delta := gossipData{} for k, e := range od { prev, ok := gd[k] if !ok { gd[k] = e delta[k] = e continue } pts, err := ptypes.Timestamp(prev.Entry.Timestamp) if err != nil { // TODO(fabxc): log error and skip entry. What can actually error here? panic(err) } ets, err := ptypes.Timestamp(e.Entry.Timestamp) if err != nil { // TODO(fabxc): see above. panic(err) } if pts.Before(ets) { gd[k] = e delta[k] = e } } return delta }
func validateSilence(s *pb.Silence) error { if s.Id == "" { return errors.New("ID missing") } if len(s.Matchers) == 0 { return errors.New("at least one matcher required") } for i, m := range s.Matchers { if err := validateMatcher(m); err != nil { return fmt.Errorf("invalid label matcher %d: %s", i, err) } } startsAt, err := ptypes.Timestamp(s.StartsAt) if err != nil { return fmt.Errorf("invalid start time: %s", err) } endsAt, err := ptypes.Timestamp(s.EndsAt) if err != nil { return fmt.Errorf("invalid end time: %s", err) } if endsAt.Before(startsAt) { return errors.New("end time must not be before start time") } if _, err := ptypes.Timestamp(s.UpdatedAt); err != nil { return fmt.Errorf("invalid update timestamp: %s", err) } return nil }
func pageCrawlHistory(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html") pkg := strings.ToLower(r.FormValue("id")) db := getDatabase() hi := db.PackageCrawlHistory(pkg) if hi == nil { pageNotFound(w, r) return } type Event struct { Time time.Time Action string } events := make([]Event, 0, len(hi.Events)) for _, e := range hi.Events { t, _ := ptypes.Timestamp(e.Timestamp) events = append(events, Event{ Time: t, Action: e.Action.String(), }) } var foundTm, succTm, failedTm *time.Time if hi.FoundTime != nil { foundTm = &time.Time{} *foundTm, _ = ptypes.Timestamp(hi.FoundTime) } if hi.LatestSuccess != nil { succTm := &time.Time{} *succTm, _ = ptypes.Timestamp(hi.LatestSuccess) } if hi.LatestFailed != nil { failedTm := &time.Time{} *failedTm, _ = ptypes.Timestamp(hi.LatestFailed) } if err := templates.ExecuteTemplate(w, "crawlhistory.html", struct { UIUtils FoundTime *time.Time FoundWay string LatestSuccess *time.Time LatestFailed *time.Time Events []Event }{ FoundTime: foundTm, FoundWay: hi.FoundWay, LatestSuccess: succTm, LatestFailed: failedTm, Events: events, }); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }
func folderInfoAvailable(info *sppb.FolderInfo) bool { if info == nil { return false } t, _ := ptypes.Timestamp(info.CrawlingTime) return t.After(time.Now().Add(-maxFolderInfoDue)) }
func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error { t := time.Time{} if r.Timestamp != nil { from, err := ptypes.Timestamp(r.Timestamp) if err != nil { return err } t = from } events := s.sv.Events.Events(t) defer s.sv.Events.Unsubscribe(events) for e := range events { tsp, err := ptypes.TimestampProto(e.Timestamp) if err != nil { return err } if err := stream.Send(&types.Event{ Id: e.ID, Type: e.Type, Timestamp: tsp, Pid: e.PID, Status: uint32(e.Status), }); err != nil { return err } } return nil }
func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error { t := time.Time{} if r.Timestamp != nil { from, err := ptypes.Timestamp(r.Timestamp) if err != nil { return err } t = from } if r.StoredOnly && t.IsZero() { return fmt.Errorf("invalid parameter: StoredOnly cannot be specified without setting a valid Timestamp") } events := s.sv.Events(t, r.StoredOnly, r.Id) defer s.sv.Unsubscribe(events) for e := range events { tsp, err := ptypes.TimestampProto(e.Timestamp) if err != nil { return err } if r.Id == "" || e.ID == r.Id { if err := stream.Send(&types.Event{ Id: e.ID, Type: e.Type, Timestamp: tsp, Pid: e.PID, Status: uint32(e.Status), }); err != nil { return err } } } return nil }
func waitForExit(c types.APIClient, events types.API_EventsClient, id, pid string, closer func()) { timestamp := time.Now() for { e, err := events.Recv() if err != nil { if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc { closer() os.Exit(128 + int(syscall.SIGHUP)) } time.Sleep(1 * time.Second) tsp, err := ptypes.TimestampProto(timestamp) if err != nil { closer() fmt.Fprintf(os.Stderr, "%s", err.Error()) os.Exit(1) } events, _ = c.Events(netcontext.Background(), &types.EventsRequest{Timestamp: tsp}) continue } timestamp, err = ptypes.Timestamp(e.Timestamp) if e.Id == id && e.Type == "exit" && e.Pid == pid { closer() os.Exit(int(e.Status)) } } }
func containerEvents(c types.APIClient, container string) <-chan *types.Event { evChan := make(chan *types.Event) ts := time.Now() go func() { for { tsp, err := ptypes.TimestampProto(ts) if err != nil { close(evChan) return } events, err := c.Events(netcontext.Background(), &types.EventsRequest{Timestamp: tsp}) if err != nil { fmt.Printf("c.Events error: %v", err) // TODO try to find a way to kill the process ? close(evChan) return } for { e, err := events.Recv() if err != nil { time.Sleep(1 * time.Second) break } ts, err = ptypes.Timestamp(e.Timestamp) if e.Id == container { evChan <- e } } } }() return evChan }
func mustTimestamp(ts *timestamp.Timestamp) time.Time { res, err := ptypes.Timestamp(ts) if err != nil { panic(err) } return res }
func pbEntryToEntry(pbEntry *protologpb.Entry) (*Entry, error) { contexts, err := entryMessagesToMessages(pbEntry.Context) if err != nil { return nil, err } event, err := entryMessageToMessage(pbEntry.Event) if err != nil { return nil, err } level, ok := pbToLevel[pbEntry.Level] if !ok { return nil, fmt.Errorf("protolog: unknown level: %v", pbEntry.Level) } t, err := ptypes.Timestamp(pbEntry.Timestamp) if err != nil { return nil, err } return &Entry{ ID: pbEntry.Id, Level: level, Time: t, Contexts: contexts, Fields: pbEntry.Fields, Event: event, Message: pbEntry.Message, WriterOutput: pbEntry.WriterOutput, }, nil }
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning{c.ID} } stats, err := daemon.containerd.Stats(c.ID) if err != nil { return nil, err } s := &types.StatsJSON{} cgs := stats.CgroupStats if cgs != nil { s.BlkioStats = types.BlkioStats{ IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), } cpu := cgs.CpuStats s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: cpu.CpuUsage.TotalUsage, PercpuUsage: cpu.CpuUsage.PercpuUsage, UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, UsageInUsermode: cpu.CpuUsage.UsageInUsermode, }, ThrottlingData: types.ThrottlingData{ Periods: cpu.ThrottlingData.Periods, ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, ThrottledTime: cpu.ThrottlingData.ThrottledTime, }, } mem := cgs.MemoryStats.Usage s.MemoryStats = types.MemoryStats{ Usage: mem.Usage, MaxUsage: mem.MaxUsage, Stats: cgs.MemoryStats.Stats, Failcnt: mem.Failcnt, Limit: mem.Limit, } // if the container does not set memory limit, use the machineMemory if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { s.MemoryStats.Limit = daemon.statsCollector.machineMemory } if cgs.PidsStats != nil { s.PidsStats = types.PidsStats{ Current: cgs.PidsStats.Current, } } } s.Read, err = ptypes.Timestamp(stats.Timestamp) if err != nil { return nil, err } return s, nil }
func CheckPackageStatus(pkg *stpb.PackageInfo, repo *sppb.RepoInfo) PackageStatus { if pkg.CrawlingInfo == nil { return OutOfDate } ct, _ := ptypes.Timestamp(pkg.CrawlingInfo.CrawlingTime) if repoInfoAvailable(repo) { lu, _ := ptypes.Timestamp(repo.LastUpdated) if lu.After(ct) { return OutOfDate } return UpToDate } if ct.After(time.Now().Add(-maxPackageInfoDue)) { return UpToDate } return OutOfDate }
// Merge the silence set with gossip data and return a new silence state. func (gd gossipData) Merge(other mesh.GossipData) mesh.GossipData { for id, s := range other.(gossipData) { prev, ok := gd[id] if !ok { gd[id] = s continue } pts, err := ptypes.Timestamp(prev.Silence.UpdatedAt) if err != nil { panic(err) } sts, err := ptypes.Timestamp(s.Silence.UpdatedAt) if err != nil { panic(err) } if pts.Before(sts) { gd[id] = s } } return gd }
func silenceFromProto(s *silencepb.Silence) (*types.Silence, error) { startsAt, err := ptypes.Timestamp(s.StartsAt) if err != nil { return nil, err } endsAt, err := ptypes.Timestamp(s.EndsAt) if err != nil { return nil, err } updatedAt, err := ptypes.Timestamp(s.UpdatedAt) if err != nil { return nil, err } sil := &types.Silence{ ID: s.Id, StartsAt: startsAt, EndsAt: endsAt, UpdatedAt: updatedAt, } for _, m := range s.Matchers { matcher := &types.Matcher{ Name: m.Name, Value: m.Pattern, } switch m.Type { case silencepb.Matcher_EQUAL: case silencepb.Matcher_REGEXP: matcher.IsRegex = true default: return nil, fmt.Errorf("unknown matcher type") } sil.Matchers = append(sil.Matchers, matcher) } if len(s.Comments) > 0 { sil.CreatedBy = s.Comments[0].Author sil.Comment = s.Comments[0].Comment } return sil, nil }
// Merge the notification set with gossip data and return a new notification // state. // TODO(fabxc): can we just return the receiver. Does it have to remain // unmodified. Needs to be clarified upstream. func (gd gossipData) Merge(other mesh.GossipData) mesh.GossipData { for k, e := range other.(gossipData) { prev, ok := gd[k] if !ok { gd[k] = e continue } pts, err := ptypes.Timestamp(prev.Entry.Timestamp) if err != nil { // TODO(fabxc): log error and skip entry. What can actually error here? panic(err) } ets, err := ptypes.Timestamp(e.Entry.Timestamp) if err != nil { // TODO(fabxc): see above. panic(err) } if pts.Before(ets) { gd[k] = e } } return gd }
// mergeDelta behaves like Merge but returns a gossipData only // containing things that have changed. func (gd gossipData) mergeDelta(od gossipData) gossipData { delta := gossipData{} for id, s := range od { prev, ok := gd[id] if !ok { gd[id] = s delta[id] = s continue } pts, err := ptypes.Timestamp(prev.Silence.UpdatedAt) if err != nil { panic(err) } sts, err := ptypes.Timestamp(s.Silence.UpdatedAt) if err != nil { panic(err) } if pts.Before(sts) { gd[id] = s delta[id] = s } } return delta }
func toMessage(resp *pb.ReceivedMessage) (*Message, error) { if resp.Message == nil { return &Message{ackID: resp.AckId}, nil } pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) if err != nil { return nil, err } return &Message{ ackID: resp.AckId, Data: resp.Message.Data, Attributes: resp.Message.Attributes, ID: resp.Message.MessageId, PublishTime: pubTime, }, nil }
func (l *nlog) log(r *pb.Receiver, gkey, ghash []byte, resolved bool) error { // Write all st with the same timestamp. now := l.now() key := stateKey(gkey, r) l.mtx.Lock() defer l.mtx.Unlock() if prevle, ok := l.st[key]; ok { // Entry already exists, only overwrite if timestamp is newer. // This may with raciness or clock-drift across AM nodes. prevts, err := ptypes.Timestamp(prevle.Entry.Timestamp) if err != nil { return err } if prevts.After(now) { return nil } } ts, err := ptypes.TimestampProto(now) if err != nil { return err } expts, err := ptypes.TimestampProto(now.Add(l.retention)) if err != nil { return err } e := &pb.MeshEntry{ Entry: &pb.Entry{ Receiver: r, GroupKey: gkey, GroupHash: ghash, Resolved: resolved, Timestamp: ts, }, ExpiresAt: expts, } l.gossip.GossipBroadcast(gossipData{ key: e, }) l.st[key] = e return nil }
// GC implements the Log interface. func (l *nlog) GC() (int, error) { now := l.now() var n int l.mtx.Lock() defer l.mtx.Unlock() for k, le := range l.st { if ets, err := ptypes.Timestamp(le.ExpiresAt); err != nil { return n, err } else if !ets.After(now) { delete(l.st, k) n++ } } return n, nil }
func (n *DedupStage) needsUpdate(entry *nflogpb.Entry, hash []byte, resolved bool, repeat time.Duration) (bool, error) { // If we haven't notified about the alert group before, notify right away // unless we only have resolved alerts. if entry == nil { return !resolved, nil } // Check whether the contents have changed. if !bytes.Equal(entry.GroupHash, hash) { return true, nil } // Nothing changed, only notify if the repeat interval has passed. ts, err := ptypes.Timestamp(entry.Timestamp) if err != nil { return false, err } return ts.Before(n.now().Add(-repeat)), nil }
func (r *remote) handleEventStream(events containerd.API_EventsClient) { for { e, err := events.Recv() if err != nil { if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && r.closeManually { // ignore error if grpc remote connection is closed manually return } logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) go r.startEventsMonitor() return } logrus.Debugf("libcontainerd: received containerd event: %#v", e) var container *container var c *client r.RLock() for _, c = range r.clients { container, err = c.getContainer(e.Id) if err == nil { break } } r.RUnlock() if container == nil { logrus.Warnf("libcontainerd: unknown container %s", e.Id) continue } if err := container.handleEvent(e); err != nil { logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) } tsp, err := ptypes.Timestamp(e.Timestamp) if err != nil { logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) continue } r.updateEventTimestamp(tsp) } }
func fromLogEntry(le *logpb.LogEntry) (*logging.Entry, error) { time, err := ptypes.Timestamp(le.Timestamp) if err != nil { return nil, err } var payload interface{} switch x := le.Payload.(type) { case *logpb.LogEntry_TextPayload: payload = x.TextPayload case *logpb.LogEntry_ProtoPayload: var d ptypes.DynamicAny if err := ptypes.UnmarshalAny(x.ProtoPayload, &d); err != nil { return nil, fmt.Errorf("logging: unmarshalling proto payload: %v", err) } payload = d.Message case *logpb.LogEntry_JsonPayload: // Leave this as a Struct. // TODO(jba): convert to map[string]interface{}? payload = x.JsonPayload default: return nil, fmt.Errorf("logging: unknown payload type: %T", le.Payload) } hr, err := toHTTPRequest(le.HttpRequest) if err != nil { return nil, err } return &logging.Entry{ Timestamp: time, Severity: logging.Severity(le.Severity), Payload: payload, Labels: le.Labels, InsertID: le.InsertId, HTTPRequest: hr, Operation: le.Operation, LogName: slashUnescaper.Replace(le.LogName), Resource: le.Resource, }, nil }
// GC implements the Log interface. func (l *nlog) GC() (int, error) { start := time.Now() defer func() { l.metrics.gcDuration.Observe(time.Since(start).Seconds()) }() now := l.now() var n int l.mtx.Lock() defer l.mtx.Unlock() for k, le := range l.st { if ets, err := ptypes.Timestamp(le.ExpiresAt); err != nil { return n, err } else if !ets.After(now) { delete(l.st, k) n++ } } return n, nil }
func (s *Silences) setSilence(sil *pb.Silence) error { endsAt, err := ptypes.Timestamp(sil.EndsAt) if err != nil { return err } expiresAt, err := ptypes.TimestampProto(endsAt.Add(s.retention)) if err != nil { return err } msil := &pb.MeshSilence{ Silence: sil, ExpiresAt: expiresAt, } st := gossipData{sil.Id: msil} s.st.Merge(st) s.gossip.GossipBroadcast(st) return nil }
func RepoInfoAge(r *sppb.RepoInfo) time.Duration { t, _ := ptypes.Timestamp(r.CrawlingTime) return time.Now().Sub(t) }
t = from } tsp, err := ptypes.TimestampProto(t) if err != nil { fatal(err.Error(), 1) } events, err := c.Events(netcontext.Background(), &types.EventsRequest{ Timestamp: tsp, }) if err != nil { fatal(err.Error(), 1) } w := tabwriter.NewWriter(os.Stdout, 31, 1, 1, ' ', 0) fmt.Fprint(w, "TIME\tTYPE\tID\tPID\tSTATUS\n") w.Flush() for { e, err := events.Recv() if err != nil { fatal(err.Error(), 1) } t, err := ptypes.Timestamp(e.Timestamp) if err != nil { fmt.Fprintf(os.Stderr, "Unable to convert timestamp") t = time.Time{} } fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\n", t.Format(time.RFC3339Nano), e.Type, e.Id, e.Pid, e.Status) w.Flush() } }, }