// touchMeta updates an object's timestamps when necessary and bumps the version // if provided. func touchMeta(meta *api.Meta, version *api.Version) error { // Skip meta update if version is not defined as it means we're applying // from raft or restoring from a snapshot. if version == nil { return nil } now, err := ptypes.TimestampProto(time.Now()) if err != nil { return err } meta.Version = *version // Updated CreatedAt if not defined if meta.CreatedAt == nil { meta.CreatedAt = now } meta.UpdatedAt = now return nil }
func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { if err := r.checkClosed(); err != nil { return err } if err := r.waitReady(ctx); err != nil { return errors.Wrap(err, "container not ready for logs") } rc, err := r.adapter.logs(ctx, options) if err != nil { return errors.Wrap(err, "failed getting container logs") } defer rc.Close() var ( // use a rate limiter to keep things under control but also provides some // ability coalesce messages. limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s msgctx = api.LogContext{ NodeID: r.task.NodeID, ServiceID: r.task.ServiceID, TaskID: r.task.ID, } ) brd := bufio.NewReader(rc) for { // so, message header is 8 bytes, treat as uint64, pull stream off MSB var header uint64 if err := binary.Read(brd, binary.BigEndian, &header); err != nil { if err == io.EOF { return nil } return errors.Wrap(err, "failed reading log header") } stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) // limit here to decrease allocation back pressure. if err := limiter.WaitN(ctx, int(size)); err != nil { return errors.Wrap(err, "failed rate limiter") } buf := make([]byte, size) _, err := io.ReadFull(brd, buf) if err != nil { return errors.Wrap(err, "failed reading buffer") } // Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish parts := bytes.SplitN(buf, []byte(" "), 2) if len(parts) != 2 { return fmt.Errorf("invalid timestamp in log message: %v", buf) } ts, err := time.Parse(time.RFC3339Nano, string(parts[0])) if err != nil { return errors.Wrap(err, "failed to parse timestamp") } tsp, err := ptypes.TimestampProto(ts) if err != nil { return errors.Wrap(err, "failed to convert timestamp") } if err := publisher.Publish(ctx, api.LogMessage{ Context: msgctx, Timestamp: tsp, Stream: api.LogStream(stream), Data: parts[1], }); err != nil { return errors.Wrap(err, "failed to publish log message") } } }
// RemoveNode removes a Node referenced by NodeID with the given NodeSpec. // - Returns NotFound if the Node is not found. // - Returns FailedPrecondition if the Node has manager role (and is part of the memberlist) or is not shut down. // - Returns InvalidArgument if NodeID or NodeVersion is not valid. // - Returns an error if the delete fails. func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest) (*api.RemoveNodeResponse, error) { if request.NodeID == "" { return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) } err := s.store.Update(func(tx store.Tx) error { node := store.GetNode(tx, request.NodeID) if node == nil { return grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID) } if node.Spec.Role == api.NodeRoleManager { if s.raft == nil { return grpc.Errorf(codes.FailedPrecondition, "node %s is a manager but cannot access node information from the raft memberlist", request.NodeID) } if member := s.raft.GetMemberByNodeID(request.NodeID); member != nil { return grpc.Errorf(codes.FailedPrecondition, "node %s is a cluster manager and is a member of the raft cluster. It must be demoted to worker before removal", request.NodeID) } } if !request.Force && node.Status.State == api.NodeStatus_READY { return grpc.Errorf(codes.FailedPrecondition, "node %s is not down and can't be removed", request.NodeID) } // lookup the cluster clusters, err := store.FindClusters(tx, store.ByName("default")) if err != nil { return err } if len(clusters) != 1 { return grpc.Errorf(codes.Internal, "could not fetch cluster object") } cluster := clusters[0] removedNode := &api.RemovedNode{ID: node.ID} // Set an expiry time for this RemovedNode if a certificate // exists and can be parsed. if len(node.Certificate.Certificate) != 0 { certBlock, _ := pem.Decode(node.Certificate.Certificate) if certBlock != nil { X509Cert, err := x509.ParseCertificate(certBlock.Bytes) if err == nil && !X509Cert.NotAfter.IsZero() { expiry, err := ptypes.TimestampProto(X509Cert.NotAfter) if err == nil { removedNode.Expiry = expiry } } } } cluster.RemovedNodes = append(cluster.RemovedNodes, removedNode) if err := store.UpdateCluster(tx, cluster); err != nil { return err } return store.DeleteNode(tx, request.NodeID) }) if err != nil { return nil, err } return &api.RemoveNodeResponse{}, nil }